Fix type annotations and make MyPy run on torch/ (#36584)

Summary:
This PR fixes a couple of syntax errors in `torch/` that prevent MyPy from running, fixes simple type annotation errors (e.g. missing `from typing import List, Tuple, Optional`), and adds granular ignores for errors in particular modules as well as for missing typing in third party packages.

As a result, running `mypy` in the root dir of the repo now runs on:
- `torch/`
- `aten/src/ATen/function_wrapper.py` (the only file already covered in CI)

In CI this runs on GitHub Actions, job Lint, sub-job "quick-checks", task "MyPy typecheck". It should give (right now): `Success: no issues found in 329 source files`.

Here are the details of the original 855 errors when running `mypy torch` on current master (after fixing the couple of syntax errors that prevent `mypy` from running through):

<details>

```
torch/utils/tensorboard/_proto_graph.py:1: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.node_def_pb2'
torch/utils/tensorboard/_proto_graph.py:2: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.attr_value_pb2'
torch/utils/tensorboard/_proto_graph.py:3: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.tensor_shape_pb2'
torch/utils/backcompat/__init__.py:1: error: Cannot find implementation or library stub for module named 'torch._C'
torch/for_onnx/__init__.py:1: error: Cannot find implementation or library stub for module named 'torch.for_onnx.onnx'
torch/cuda/nvtx.py:2: error: Cannot find implementation or library stub for module named 'torch._C'
torch/utils/show_pickle.py:59: error: Name 'pickle._Unpickler' is not defined
torch/utils/show_pickle.py:113: error: "Type[PrettyPrinter]" has no attribute "_dispatch"
torch/utils/tensorboard/_onnx_graph.py:1: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.graph_pb2'
torch/utils/tensorboard/_onnx_graph.py:2: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.node_def_pb2'
torch/utils/tensorboard/_onnx_graph.py:3: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.versions_pb2'
torch/utils/tensorboard/_onnx_graph.py:4: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.attr_value_pb2'
torch/utils/tensorboard/_onnx_graph.py:5: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.tensor_shape_pb2'
torch/utils/tensorboard/_onnx_graph.py:9: error: Cannot find implementation or library stub for module named 'onnx'
torch/contrib/_tensorboard_vis.py:10: error: Cannot find implementation or library stub for module named 'tensorflow.core.util'
torch/contrib/_tensorboard_vis.py:11: error: Cannot find implementation or library stub for module named 'tensorflow.core.framework'
torch/contrib/_tensorboard_vis.py:12: error: Cannot find implementation or library stub for module named 'tensorflow.python.summary.writer.writer'
torch/utils/hipify/hipify_python.py:43: error: Need type annotation for 'CAFFE2_TEMPLATE_MAP' (hint: "CAFFE2_TEMPLATE_MAP: Dict[<type>, <type>] = ...")
torch/utils/hipify/hipify_python.py:636: error: "object" has no attribute "items"
torch/nn/_reduction.py:27: error: Name 'Optional' is not defined
torch/nn/_reduction.py:27: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/nn/_reduction.py:47: error: Name 'Optional' is not defined
torch/nn/_reduction.py:47: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/utils/tensorboard/_utils.py:17: error: Skipping analyzing 'matplotlib.pyplot': found module but no type hints or library stubs
torch/utils/tensorboard/_utils.py:17: error: Skipping analyzing 'matplotlib': found module but no type hints or library stubs
torch/utils/tensorboard/_utils.py:18: error: Skipping analyzing 'matplotlib.backends.backend_agg': found module but no type hints or library stubs
torch/utils/tensorboard/_utils.py:18: error: Skipping analyzing 'matplotlib.backends': found module but no type hints or library stubs
torch/nn/modules/utils.py:27: error: Name 'List' is not defined
torch/nn/modules/utils.py:27: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List")
caffe2/proto/caffe2_pb2.py:17: error: Unexpected keyword argument "serialized_options" for "FileDescriptor"; did you mean "serialized_pb"?
caffe2/proto/caffe2_pb2.py:25: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor"
caffe2/proto/caffe2_pb2.py:31: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:35: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:39: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:43: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:47: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:51: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:55: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:59: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:63: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:67: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:71: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:75: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:102: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor"
caffe2/proto/caffe2_pb2.py:108: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:112: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:124: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor"
caffe2/proto/caffe2_pb2.py:130: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:134: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:138: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:142: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:146: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:150: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:154: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:158: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:162: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:166: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:170: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:174: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:178: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:182: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:194: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor"
caffe2/proto/caffe2_pb2.py:200: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:204: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:208: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:212: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:224: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor"
caffe2/proto/caffe2_pb2.py:230: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:234: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:238: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:242: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:246: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:250: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:254: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/caffe2_pb2.py:267: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:274: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:281: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:288: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:295: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:302: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:327: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:334: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:341: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:364: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:371: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:378: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:385: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:392: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:399: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:406: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:413: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:420: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:427: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:434: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:441: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:448: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:455: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:462: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:488: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:495: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:502: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:509: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:516: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:523: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:530: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:537: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:544: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:551: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:558: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:565: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:572: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:596: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:603: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:627: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:634: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:641: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:648: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:655: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:662: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:686: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:693: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:717: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:724: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:731: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:738: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:763: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:770: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:777: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:784: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:808: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:815: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:822: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:829: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:836: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:843: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:850: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:857: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:864: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:871: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:878: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:885: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:892: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:916: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:923: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:930: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:937: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:944: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:951: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:958: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:982: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:989: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:996: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1003: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1010: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1017: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1024: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1031: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1038: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1045: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1052: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1059: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1066: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1090: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:1097: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1104: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1128: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:1135: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1142: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1166: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:1173: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1180: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1187: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1194: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1218: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:1225: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1232: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1239: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1246: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1253: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1260: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1267: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1274: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1281: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1305: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:1312: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1319: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1326: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1333: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1340: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1347: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1354: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1361: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1368: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1375: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1382: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1389: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1396: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1420: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:1427: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1434: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1441: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1465: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:1472: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1479: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1486: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1493: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1500: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1507: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1514: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1538: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/caffe2_pb2.py:1545: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1552: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1559: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1566: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/caffe2_pb2.py:1667: error: "GeneratedProtocolMessageType" has no attribute "Segment"
torch/multiprocessing/queue.py:4: error: No library stub file for standard library module 'multiprocessing.reduction'
caffe2/proto/torch_pb2.py:18: error: Unexpected keyword argument "serialized_options" for "FileDescriptor"; did you mean "serialized_pb"?
caffe2/proto/torch_pb2.py:27: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor"
caffe2/proto/torch_pb2.py:33: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor"
caffe2/proto/torch_pb2.py:50: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/torch_pb2.py:57: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:81: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/torch_pb2.py:88: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:95: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:102: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:109: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:116: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:123: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:130: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:137: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:144: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:151: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:175: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/torch_pb2.py:182: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:189: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:196: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:220: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/torch_pb2.py:227: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:234: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:241: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:265: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/torch_pb2.py:272: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:279: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:286: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:293: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:300: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:307: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:314: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:321: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:328: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:335: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:342: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:366: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/torch_pb2.py:373: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:397: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/torch_pb2.py:404: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:411: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:418: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:425: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/torch_pb2.py:432: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:17: error: Unexpected keyword argument "serialized_options" for "FileDescriptor"; did you mean "serialized_pb"?
caffe2/proto/metanet_pb2.py:29: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/metanet_pb2.py:36: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:43: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:50: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:57: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:64: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:88: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/metanet_pb2.py:95: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:102: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:126: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/metanet_pb2.py:133: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:140: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:164: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/metanet_pb2.py:171: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:178: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:202: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/metanet_pb2.py:209: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:216: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:240: error: Unexpected keyword argument "serialized_options" for "Descriptor"
caffe2/proto/metanet_pb2.py:247: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:254: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:261: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:268: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:275: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:282: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:289: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/metanet_pb2.py:296: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor"
caffe2/proto/__init__.py:13: error: Skipping analyzing 'caffe2.caffe2.fb.session.proto': found module but no type hints or library stubs
torch/multiprocessing/pool.py:3: error: No library stub file for standard library module 'multiprocessing.util'
torch/multiprocessing/pool.py:3: note: (Stub files are from https://github.com/python/typeshed)
caffe2/python/scope.py:10: error: Skipping analyzing 'past.builtins': found module but no type hints or library stubs
caffe2/python/__init__.py:7: error: Module has no attribute "CPU"
caffe2/python/__init__.py:8: error: Module has no attribute "CUDA"
caffe2/python/__init__.py:9: error: Module has no attribute "MKLDNN"
caffe2/python/__init__.py:10: error: Module has no attribute "OPENGL"
caffe2/python/__init__.py:11: error: Module has no attribute "OPENCL"
caffe2/python/__init__.py:12: error: Module has no attribute "IDEEP"
caffe2/python/__init__.py:13: error: Module has no attribute "HIP"
caffe2/python/__init__.py:14: error: Module has no attribute "COMPILE_TIME_MAX_DEVICE_TYPES"; maybe "PROTO_COMPILE_TIME_MAX_DEVICE_TYPES"?
caffe2/python/__init__.py:15: error: Module has no attribute "ONLY_FOR_TEST"; maybe "PROTO_ONLY_FOR_TEST"?
caffe2/python/__init__.py:34: error: Item "_Loader" of "Optional[_Loader]" has no attribute "exec_module"
caffe2/python/__init__.py:34: error: Item "None" of "Optional[_Loader]" has no attribute "exec_module"
caffe2/python/__init__.py:35: error: Module has no attribute "cuda"
caffe2/python/__init__.py:37: error: Module has no attribute "cuda"
caffe2/python/__init__.py:49: error: Module has no attribute "add_dll_directory"
torch/random.py:4: error: Cannot find implementation or library stub for module named 'torch._C'
torch/_classes.py:2: error: Cannot find implementation or library stub for module named 'torch._C'
torch/onnx/__init__.py:1: error: Cannot find implementation or library stub for module named 'torch._C'
torch/hub.py:21: error: Skipping analyzing 'tqdm.auto': found module but no type hints or library stubs
torch/hub.py:24: error: Skipping analyzing 'tqdm': found module but no type hints or library stubs
torch/hub.py:27: error: Name 'tqdm' already defined (possibly by an import)
torch/_tensor_str.py:164: error: Not all arguments converted during string formatting
torch/_ops.py:1: error: Cannot find implementation or library stub for module named 'torch._C'
torch/_linalg_utils.py:26: error: Name 'Optional' is not defined
torch/_linalg_utils.py:26: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/_linalg_utils.py:26: error: Name 'Tensor' is not defined
torch/_linalg_utils.py:63: error: Name 'Tensor' is not defined
torch/_linalg_utils.py:63: error: Name 'Optional' is not defined
torch/_linalg_utils.py:63: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/_linalg_utils.py:70: error: Name 'Optional' is not defined
torch/_linalg_utils.py:70: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/_linalg_utils.py:70: error: Name 'Tensor' is not defined
torch/_linalg_utils.py:88: error: Name 'Tensor' is not defined
torch/_linalg_utils.py:88: error: Name 'Optional' is not defined
torch/_linalg_utils.py:88: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/_linalg_utils.py:88: error: Name 'Tuple' is not defined
torch/_linalg_utils.py:88: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/_jit_internal.py:17: error: Need type annotation for 'boolean_dispatched'
torch/_jit_internal.py:474: error: Need type annotation for '_overloaded_fns' (hint: "_overloaded_fns: Dict[<type>, <type>] = ...")
torch/_jit_internal.py:512: error: Need type annotation for '_overloaded_methods' (hint: "_overloaded_methods: Dict[<type>, <type>] = ...")
torch/_jit_internal.py:648: error: Incompatible types in assignment (expression has type "FinalCls", variable has type "_SpecialForm")
torch/sparse/__init__.py:11: error: Name 'Tensor' is not defined
torch/sparse/__init__.py:71: error: Name 'Tensor' is not defined
torch/sparse/__init__.py:71: error: Name 'Optional' is not defined
torch/sparse/__init__.py:71: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/sparse/__init__.py:71: error: Name 'Tuple' is not defined
torch/sparse/__init__.py:71: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/nn/init.py:109: error: Name 'Tensor' is not defined
torch/nn/init.py:126: error: Name 'Tensor' is not defined
torch/nn/init.py:142: error: Name 'Tensor' is not defined
torch/nn/init.py:165: error: Name 'Tensor' is not defined
torch/nn/init.py:180: error: Name 'Tensor' is not defined
torch/nn/init.py:194: error: Name 'Tensor' is not defined
torch/nn/init.py:287: error: Name 'Tensor' is not defined
torch/nn/init.py:315: error: Name 'Tensor' is not defined
torch/multiprocessing/reductions.py:8: error: No library stub file for standard library module 'multiprocessing.util'
torch/multiprocessing/reductions.py:9: error: No library stub file for standard library module 'multiprocessing.reduction'
torch/multiprocessing/reductions.py:17: error: No library stub file for standard library module 'multiprocessing.resource_sharer'
torch/jit/_builtins.py:72: error: Module has no attribute "_no_grad_embedding_renorm_"
torch/jit/_builtins.py:80: error: Module has no attribute "stft"
torch/jit/_builtins.py:81: error: Module has no attribute "cdist"
torch/jit/_builtins.py:82: error: Module has no attribute "norm"
torch/jit/_builtins.py:83: error: Module has no attribute "nuclear_norm"
torch/jit/_builtins.py:84: error: Module has no attribute "frobenius_norm"
torch/backends/cudnn/__init__.py:8: error: Cannot find implementation or library stub for module named 'torch._C'
torch/backends/cudnn/__init__.py:86: error: Need type annotation for '_handles' (hint: "_handles: Dict[<type>, <type>] = ...")
torch/autograd/profiler.py:13: error: Name 'ContextDecorator' already defined (possibly by an import)
torch/autograd/function.py:2: error: Cannot find implementation or library stub for module named 'torch._C'
torch/autograd/function.py:2: note: See https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports
torch/autograd/function.py:109: error: Unsupported dynamic base class "with_metaclass"
torch/serialization.py:609: error: "Callable[[Any], Any]" has no attribute "cache"
torch/_lowrank.py:11: error: Name 'Tensor' is not defined
torch/_lowrank.py:13: error: Name 'Optional' is not defined
torch/_lowrank.py:13: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/_lowrank.py:14: error: Name 'Optional' is not defined
torch/_lowrank.py:14: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/_lowrank.py:14: error: Name 'Tensor' is not defined
torch/_lowrank.py:82: error: Name 'Tensor' is not defined
torch/_lowrank.py:82: error: Name 'Optional' is not defined
torch/_lowrank.py:82: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/_lowrank.py:82: error: Name 'Tuple' is not defined
torch/_lowrank.py:82: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/_lowrank.py:130: error: Name 'Tensor' is not defined
torch/_lowrank.py:130: error: Name 'Optional' is not defined
torch/_lowrank.py:130: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/_lowrank.py:130: error: Name 'Tuple' is not defined
torch/_lowrank.py:130: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/_lowrank.py:167: error: Name 'Tensor' is not defined
torch/_lowrank.py:167: error: Name 'Optional' is not defined
torch/_lowrank.py:167: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/_lowrank.py:167: error: Name 'Tuple' is not defined
torch/_lowrank.py:167: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/quantization/observer.py:45: error: Variable "torch.quantization.observer.ABC" is not valid as a type
torch/quantization/observer.py:45: note: See https://mypy.readthedocs.io/en/latest/common_issues.html#variables-vs-type-aliases
torch/quantization/observer.py:45: error: Invalid base class "ABC"
torch/quantization/observer.py:127: error: Name 'Tensor' is not defined
torch/quantization/observer.py:127: error: Name 'Tuple' is not defined
torch/quantization/observer.py:127: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/quantization/observer.py:172: error: Module has no attribute "per_tensor_symmetric"
torch/quantization/observer.py:172: error: Module has no attribute "per_channel_symmetric"
torch/quantization/observer.py:192: error: Name 'Tensor' is not defined
torch/quantization/observer.py:192: error: Name 'Tuple' is not defined
torch/quantization/observer.py:192: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/quantization/observer.py:233: error: Module has no attribute "per_tensor_symmetric"
torch/quantization/observer.py:233: error: Module has no attribute "per_channel_symmetric"
torch/quantization/observer.py:534: error: Name 'Tensor' is not defined
torch/quantization/observer.py:885: error: Name 'Tensor' is not defined
torch/quantization/observer.py:885: error: Name 'Tuple' is not defined
torch/quantization/observer.py:885: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/quantization/observer.py:894: error: Cannot determine type of 'max_val'
torch/quantization/observer.py:894: error: Cannot determine type of 'min_val'
torch/quantization/observer.py:899: error: Cannot determine type of 'min_val'
torch/quantization/observer.py:902: error: Name 'Tensor' is not defined
torch/quantization/observer.py:925: error: Name 'Tensor' is not defined
torch/quantization/observer.py:928: error: Cannot determine type of 'min_val'
torch/quantization/observer.py:929: error: Cannot determine type of 'max_val'
torch/quantization/observer.py:946: error: Argument "min" to "histc" has incompatible type "Tuple[Tensor, Tensor]"; expected "Union[int, float, bool]"
torch/quantization/observer.py:946: error: Argument "max" to "histc" has incompatible type "Tuple[Tensor, Tensor]"; expected "Union[int, float, bool]"
torch/quantization/observer.py:1056: error: Module has no attribute "per_tensor_symmetric"
torch/quantization/observer.py:1058: error: Module has no attribute "per_channel_symmetric"
torch/nn/quantized/functional.py:76: error: Name 'Tensor' is not defined
torch/nn/quantized/functional.py:76: error: Name 'BroadcastingList2' is not defined
torch/nn/quantized/functional.py:259: error: Name 'Tensor' is not defined
torch/nn/quantized/functional.py:259: error: Name 'Optional' is not defined
torch/nn/quantized/functional.py:259: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/nn/quantized/functional.py:289: error: Module has no attribute "ops"
torch/nn/quantized/functional.py:290: error: Module has no attribute "ops"
torch/nn/quantized/functional.py:308: error: Name 'Tensor' is not defined
torch/nn/quantized/functional.py:326: error: Name 'Tensor' is not defined
torch/nn/quantized/functional.py:356: error: Name 'Tensor' is not defined
torch/nn/quantized/functional.py:371: error: Name 'Tensor' is not defined
torch/nn/quantized/functional.py:400: error: Name 'Tensor' is not defined
torch/nn/quantized/functional.py:400: error: Name 'Optional' is not defined
torch/nn/quantized/functional.py:400: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/nn/quantized/functional.py:430: error: Name 'Tensor' is not defined
torch/nn/quantized/functional.py:448: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/linear.py:26: error: Module has no attribute "ops"
torch/nn/quantized/modules/linear.py:28: error: Module has no attribute "ops"
torch/nn/quantized/modules/functional_modules.py:40: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:47: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:54: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:61: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:68: error: Name 'List' is not defined
torch/nn/quantized/modules/functional_modules.py:68: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List")
torch/nn/quantized/modules/functional_modules.py:68: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:75: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:140: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:146: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:151: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:157: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:162: error: Name 'List' is not defined
torch/nn/quantized/modules/functional_modules.py:162: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List")
torch/nn/quantized/modules/functional_modules.py:162: error: Name 'Tensor' is not defined
torch/nn/quantized/modules/functional_modules.py:168: error: Name 'Tensor' is not defined
torch/multiprocessing/spawn.py:9: error: Module 'torch.multiprocessing' has no attribute '_prctl_pr_set_pdeathsig'
torch/multiprocessing/__init__.py:28: error: Module has no attribute "__all__"
torch/jit/frontend.py:9: error: Cannot find implementation or library stub for module named 'torch._C._jit_tree_views'
torch/jit/annotations.py:6: error: Module 'torch._jit_internal' has no attribute 'BroadcastingList2'; maybe "BroadcastingList1" or "BroadcastingListCls"?
torch/jit/annotations.py:6: error: Module 'torch._jit_internal' has no attribute 'BroadcastingList3'; maybe "BroadcastingList1" or "BroadcastingListCls"?
torch/jit/annotations.py:9: error: Cannot find implementation or library stub for module named 'torch._C'
torch/distributions/distribution.py:16: error: Need type annotation for 'arg_constraints' (hint: "arg_constraints: Dict[<type>, <type>] = ...")
torch/distributions/distribution.py:74: error: Name 'arg_constraints' already defined on line 16
torch/distributions/distribution.py:84: error: Name 'support' already defined on line 15
torch/functional.py:114: error: Name 'Tuple' is not defined
torch/functional.py:114: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/functional.py:114: error: Name 'Optional' is not defined
torch/functional.py:114: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/functional.py:189: error: Incompatible types in assignment (expression has type "None", variable has type "Tensor")
torch/functional.py:200: error: Argument 1 to "_indices_product" has incompatible type "Tuple[int, ...]"; expected "List[int]"
torch/functional.py:204: error: No overload variant of "__setitem__" of "list" matches argument types "Tensor", "int"
torch/functional.py:204: note: Possible overload variants:
torch/functional.py:204: note:     def __setitem__(self, int, int) -> None
torch/functional.py:204: note:     def __setitem__(self, slice, Iterable[int]) -> None
torch/functional.py:204: error: No overload variant of "__getitem__" of "list" matches argument type "Tensor"
torch/functional.py:204: note:     def __getitem__(self, int) -> int
torch/functional.py:204: note:     def __getitem__(self, slice) -> List[int]
torch/functional.py:207: error: "Tensor" has no attribute "copy_"
torch/functional.py:212: error: No overload variant of "__setitem__" of "list" matches argument types "Tensor", "int"
torch/functional.py:212: note: Possible overload variants:
torch/functional.py:212: note:     def __setitem__(self, int, int) -> None
torch/functional.py:212: note:     def __setitem__(self, slice, Iterable[int]) -> None
torch/functional.py:212: error: No overload variant of "__getitem__" of "list" matches argument type "Tensor"
torch/functional.py:212: note:     def __getitem__(self, int) -> int
torch/functional.py:212: note:     def __getitem__(self, slice) -> List[int]
torch/functional.py:215: error: Incompatible types in assignment (expression has type "None", variable has type "Tensor")
torch/functional.py:334: error: Name 'Optional' is not defined
torch/functional.py:334: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/functional.py:429: error: Argument 2 to "pad" has incompatible type "Tuple[int, int]"; expected "List[int]"
torch/functional.py:431: error: Module has no attribute "stft"
torch/functional.py:766: error: Module has no attribute "cdist"
torch/functional.py:768: error: Module has no attribute "cdist"
torch/functional.py:770: error: Module has no attribute "cdist"
torch/functional.py:775: error: Name 'Optional' is not defined
torch/functional.py:775: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/functional.py:780: error: Name 'Optional' is not defined
torch/functional.py:780: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/functional.py:780: error: Name 'number' is not defined
torch/functional.py:780: error: Name 'norm' already defined on line 775
torch/functional.py:785: error: Name 'Optional' is not defined
torch/functional.py:785: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/functional.py:785: error: Name 'number' is not defined
torch/functional.py:785: error: Name 'norm' already defined on line 775
torch/functional.py:790: error: Name 'Optional' is not defined
torch/functional.py:790: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/functional.py:790: error: Name 'norm' already defined on line 775
torch/functional.py:795: error: Name 'norm' already defined on line 775
torch/functional.py:960: error: Name 'Any' is not defined
torch/functional.py:960: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Any")
torch/functional.py:960: error: Name 'Tuple' is not defined
torch/functional.py:960: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/functional.py:1036: error: Argument 1 to "len" has incompatible type "int"; expected "Sized"
torch/functional.py:1041: error: Name 'Optional' is not defined
torch/functional.py:1041: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/functional.py:1041: error: Name 'Tuple' is not defined
torch/functional.py:1041: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/functional.py:1056: error: Name 'Optional' is not defined
torch/functional.py:1056: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/functional.py:1056: error: Name 'Tuple' is not defined
torch/functional.py:1056: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple")
torch/distributions/von_mises.py:87: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/distributions/negative_binomial.py:25: error: Incompatible types in assignment (expression has type "_IntegerGreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/multivariate_normal.py:116: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/distributions/laplace.py:23: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/distributions/independent.py:34: error: Need type annotation for 'arg_constraints' (hint: "arg_constraints: Dict[<type>, <type>] = ...")
torch/distributions/cauchy.py:28: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/distributions/poisson.py:28: error: Incompatible types in assignment (expression has type "_IntegerGreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/one_hot_categorical.py:32: error: Incompatible types in assignment (expression has type "_Simplex", base class "Distribution" defined the type as "None")
torch/distributions/normal.py:27: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/distributions/lowrank_multivariate_normal.py:79: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/distributions/gamma.py:30: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/exponential.py:23: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/fishersnedecor.py:25: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/dirichlet.py:44: error: Incompatible types in assignment (expression has type "_Simplex", base class "Distribution" defined the type as "None")
torch/nn/quantized/dynamic/modules/rnn.py:230: error: Incompatible types in assignment (expression has type "int", variable has type "Tensor")
torch/nn/quantized/dynamic/modules/rnn.py:232: error: Incompatible types in assignment (expression has type "int", variable has type "Tensor")
torch/nn/quantized/dynamic/modules/rnn.py:236: error: Incompatible return value type (got "Tuple[Any, Tensor, Any]", expected "Tuple[int, int, int]")
torch/nn/quantized/dynamic/modules/rnn.py:351: error: Incompatible types in assignment (expression has type "Type[LSTM]", base class "RNNBase" defined the type as "Type[RNNBase]")
torch/nn/quantized/dynamic/modules/rnn.py:381: error: Module has no attribute "quantized_lstm"
torch/nn/quantized/dynamic/modules/rnn.py:385: error: Module has no attribute "quantized_lstm"
torch/nn/quantized/dynamic/modules/rnn.py:414: error: Argument 1 to "forward_impl" of "LSTM" has incompatible type "PackedSequence"; expected "Tensor"
torch/nn/quantized/dynamic/modules/rnn.py:416: error: Incompatible types in assignment (expression has type "PackedSequence", variable has type "Tensor")
torch/nn/quantized/dynamic/modules/rnn.py:418: error: Incompatible return value type (got "Tuple[Tensor, Tuple[Tensor, Tensor]]", expected "Tuple[PackedSequence, Tuple[Tensor, Tensor]]")
torch/nn/quantized/dynamic/modules/rnn.py:420: error: Argument 1 of "permute_hidden" is incompatible with supertype "RNNBase"; supertype defines the argument type as "Tensor"
torch/nn/quantized/dynamic/modules/rnn.py:420: error: Return type "Tuple[Tensor, Tensor]" of "permute_hidden" incompatible with return type "Tensor" in supertype "RNNBase"
torch/nn/quantized/dynamic/modules/rnn.py:426: error: Argument 2 of "check_forward_args" is incompatible with supertype "RNNBase"; supertype defines the argument type as "Tensor"
torch/nn/intrinsic/qat/modules/conv_fused.py:232: error: Incompatible types in assignment (expression has type "Type[ConvBnReLU2d]", base class "ConvBn2d" defined the type as "Type[ConvBn2d]")
torch/distributions/beta.py:27: error: Incompatible types in assignment (expression has type "_Interval", base class "Distribution" defined the type as "None")
torch/distributions/geometric.py:31: error: Incompatible types in assignment (expression has type "_IntegerGreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/continuous_bernoulli.py:38: error: Incompatible types in assignment (expression has type "_Interval", base class "Distribution" defined the type as "None")
torch/distributions/bernoulli.py:30: error: Incompatible types in assignment (expression has type "_Boolean", base class "Distribution" defined the type as "None")
torch/quantization/fake_quantize.py:126: error: Module has no attribute "per_tensor_symmetric"
torch/quantization/fake_quantize.py:132: error: Module has no attribute "per_channel_symmetric"
torch/distributions/transformed_distribution.py:41: error: Need type annotation for 'arg_constraints' (hint: "arg_constraints: Dict[<type>, <type>] = ...")
torch/jit/__init__.py:1: error: Cannot find implementation or library stub for module named 'torch._C'
torch/jit/__init__.py:15: error: Module 'torch.utils' has no attribute 'set_module'
torch/jit/__init__.py:70: error: Name 'Attribute' already defined on line 68
torch/jit/__init__.py:213: error: On Python 3 '{}'.format(b'abc') produces "b'abc'"; use !r if this is a desired behavior
torch/jit/__init__.py:215: error: On Python 3 '{}'.format(b'abc') produces "b'abc'"; use !r if this is a desired behavior
torch/jit/__init__.py:1524: error: Unsupported dynamic base class "with_metaclass"
torch/jit/__init__.py:1869: error: Name 'ScriptModule' already defined on line 1524
torch/jit/__init__.py:1998: error: Need type annotation for '_jit_caching_layer'
torch/jit/__init__.py:1999: error: Need type annotation for '_jit_function_overload_caching'
torch/distributions/relaxed_categorical.py:34: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/distributions/relaxed_categorical.py:108: error: Incompatible types in assignment (expression has type "_Simplex", base class "Distribution" defined the type as "None")
torch/distributions/relaxed_bernoulli.py:31: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/distributions/relaxed_bernoulli.py:114: error: Incompatible types in assignment (expression has type "_Interval", base class "Distribution" defined the type as "None")
torch/distributions/logistic_normal.py:31: error: Incompatible types in assignment (expression has type "_Simplex", base class "Distribution" defined the type as "None")
torch/distributions/log_normal.py:26: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/half_normal.py:27: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/half_cauchy.py:28: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/gumbel.py:28: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/nn/quantized/modules/conv.py:18: error: Module 'torch.nn.utils' has no attribute 'fuse_conv_bn_weights'
torch/nn/quantized/modules/conv.py:209: error: Name 'Optional' is not defined
torch/nn/quantized/modules/conv.py:209: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/nn/quantized/modules/conv.py:214: error: Module has no attribute "ops"
torch/nn/quantized/modules/conv.py:321: error: Name 'Optional' is not defined
torch/nn/quantized/modules/conv.py:321: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/nn/quantized/modules/conv.py:323: error: Module has no attribute "ops"
torch/nn/quantized/modules/conv.py:447: error: Name 'Optional' is not defined
torch/nn/quantized/modules/conv.py:447: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional")
torch/nn/quantized/modules/conv.py:449: error: Module has no attribute "ops"
torch/nn/quantized/modules/conv.py:513: error: Name 'nn.modules.conv._ConvTransposeNd' is not defined
torch/nn/quantized/modules/conv.py:525: error: Name 'List' is not defined
torch/nn/quantized/modules/conv.py:525: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List")
torch/nn/quantized/modules/conv.py:527: error: Name 'List' is not defined
torch/nn/quantized/modules/conv.py:527: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List")
torch/nn/intrinsic/quantized/modules/conv_relu.py:8: error: Module 'torch.nn.utils' has no attribute 'fuse_conv_bn_weights'
torch/nn/intrinsic/quantized/modules/conv_relu.py:21: error: Incompatible types in assignment (expression has type "Type[ConvReLU2d]", base class "Conv2d" defined the type as "Type[Conv2d]")
torch/nn/intrinsic/quantized/modules/conv_relu.py:62: error: Incompatible types in assignment (expression has type "Type[ConvReLU3d]", base class "Conv3d" defined the type as "Type[Conv3d]")
torch/distributions/weibull.py:25: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None")
torch/distributions/kl.py:35: error: Need type annotation for '_KL_MEMOIZE' (hint: "_KL_MEMOIZE: Dict[<type>, <type>] = ...")
torch/distributions/studentT.py:27: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None")
torch/distributions/mixture_same_family.py:48: error: Need type annotation for 'arg_constraints' (hint: "arg_constraints: Dict[<type>, <type>] = ...")
torch/distributions/__init__.py:158: error: Name 'transforms' is not defined
torch/onnx/utils.py:21: error: Cannot find implementation or library stub for module named 'torch._C'
torch/distributed/rendezvous.py:4: error: Cannot find implementation or library stub for module named 'urlparse'
torch/distributed/rendezvous.py:4: error: Name 'urlparse' already defined (possibly by an import)
torch/distributed/rendezvous.py:4: error: Name 'urlunparse' already defined (possibly by an import)
torch/distributed/rendezvous.py:9: error: Module 'torch.distributed' has no attribute 'FileStore'
torch/distributed/rendezvous.py:9: error: Module 'torch.distributed' has no attribute 'TCPStore'
torch/distributed/rendezvous.py:65: error: On Python 3 '{}'.format(b'abc') produces "b'abc'"; use !r if this is a desired behavior
torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'AllreduceOptions'; maybe "ReduceOptions" or "AllreduceCoalescedOptions"?
torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'AllreduceCoalescedOptions'; maybe "AllreduceOptions"?
torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'AllToAllOptions'
torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'BroadcastOptions'
torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'GatherOptions'; maybe "ScatterOptions"?
torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'ReduceOptions'; maybe "AllreduceOptions", "ReduceScatterOptions", or "ReduceOp"?
torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'ReduceScatterOptions'; maybe "ScatterOptions" or "ReduceOptions"?
torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'ScatterOptions'; maybe "ReduceScatterOptions" or
Pull Request resolved: https://github.com/pytorch/pytorch/pull/36584

Reviewed By: seemethere, ailzhang

Differential Revision: D21155985

Pulled By: ezyang

fbshipit-source-id: f628d4293992576207167e7c417998fad15898d1
This commit is contained in:
Ralf Gommers 2020-04-22 14:14:28 -07:00 committed by Facebook GitHub Bot
parent e921cd222a
commit 78d5707041
63 changed files with 452 additions and 86 deletions

View File

@ -54,6 +54,7 @@ fi
# Test the package # Test the package
/builder/check_binary.sh /builder/check_binary.sh
# =================== The above code will be executed inside Docker container =================== # =================== The above code will be executed inside Docker container ===================
EOL EOL
echo echo

View File

@ -38,10 +38,6 @@ jobs:
- name: Ensure C++ source files are not executable - name: Ensure C++ source files are not executable
run: | run: |
(! find . \( -path ./third_party -o -path ./.git -o -path ./torch/bin -o -path ./build \) -prune -o -type f -executable -regextype posix-egrep -not -regex '.+(\.(bash|sh|py|so)|git-pre-commit|git-clang-format)$' -print | grep . || (echo 'The above files have executable permission; please remove their executable permission by using `chmod -x`'; false)) (! find . \( -path ./third_party -o -path ./.git -o -path ./torch/bin -o -path ./build \) -prune -o -type f -executable -regextype posix-egrep -not -regex '.+(\.(bash|sh|py|so)|git-pre-commit|git-clang-format)$' -print | grep . || (echo 'The above files have executable permission; please remove their executable permission by using `chmod -x`'; false))
- name: MyPy typecheck
run: |
pip install mypy mypy-extensions
mypy @mypy-files.txt
- name: C++ docs check - name: C++ docs check
run: | run: |
sudo apt-get install -y doxygen && pip install -r requirements.txt sudo apt-get install -y doxygen && pip install -r requirements.txt

View File

@ -65,7 +65,8 @@ if [[ "$BUILD_ENVIRONMENT" != *ppc64le* ]] && [[ "$BUILD_ENVIRONMENT" != *-bazel
pip_install --user tb-nightly pip_install --user tb-nightly
# mypy will fail to install on Python <3.4. In that case, # mypy will fail to install on Python <3.4. In that case,
# we just won't run these tests. # we just won't run these tests.
pip_install --user mypy || true # Pin MyPy version because new errors are likely to appear with each release
pip_install --user "mypy==0.770" || true
fi fi
# faulthandler become built-in since 3.3 # faulthandler become built-in since 3.3

View File

@ -20,6 +20,7 @@ import yaml
import argparse import argparse
import os import os
from copy import deepcopy from copy import deepcopy
from typing import Dict, List
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--template_dir", default=".", help="where template.h is") parser.add_argument("--template_dir", default=".", help="where template.h is")
@ -38,7 +39,7 @@ if args.aten_root:
sys.path.append(os.path.join(args.aten_root, 'src', 'ATen')) sys.path.append(os.path.join(args.aten_root, 'src', 'ATen'))
from code_template import CodeTemplate as CT from code_template import CodeTemplate as CT
else: else:
from src.ATen.code_template import CodeTemplate as CT from src.ATen.code_template import CodeTemplate as CT # type: ignore[import,no-redef]
OP_TEMPLATE = CT.from_file( OP_TEMPLATE = CT.from_file(
os.path.join(args.template_dir, 'aten_op_template.h')) os.path.join(args.template_dir, 'aten_op_template.h'))
@ -48,7 +49,7 @@ try:
# use faster C loader if available # use faster C loader if available
from yaml import CLoader as Loader from yaml import CLoader as Loader
except ImportError: except ImportError:
from yaml import Loader from yaml import Loader # type: ignore[misc]
def write(filename, s): def write(filename, s):
@ -228,7 +229,7 @@ if __name__ == '__main__':
top_env = { top_env = {
'mappings': [], 'mappings': [],
'implementations': [], 'implementations': [],
} } # type: Dict[str, List]
seen = set() seen = set()
key = 0 key = 0
for o in filtered: for o in filtered:

View File

@ -4,7 +4,7 @@ from __future__ import print_function
from __future__ import unicode_literals from __future__ import unicode_literals
import numpy as np import numpy as np
import cPickle as pickle import pickle
from collections import OrderedDict from collections import OrderedDict
from caffe2.proto import caffe2_pb2 from caffe2.proto import caffe2_pb2

View File

@ -115,7 +115,7 @@ def tensorboard_graphs(c2_netdef, tf_dir):
log.setLevel(logging.INFO) log.setLevel(logging.INFO)
def parse_net_def(path): def parse_net_def(path):
import google.protobuf.text_format import google.protobuf.text_format # type: ignore[import]
net_def = caffe2_pb2.NetDef() net_def = caffe2_pb2.NetDef()
with open(path) as f: with open(path) as f:
google.protobuf.text_format.Merge(f.read(), net_def) google.protobuf.text_format.Merge(f.read(), net_def)

View File

@ -8,7 +8,7 @@ import os
import tempfile import tempfile
import shutil import shutil
from caffe2.distributed.python import StoreHandlerTimeoutError from caffe2.distributed.python import StoreHandlerTimeoutError # type: ignore[import]
from caffe2.distributed.store_ops_test_util import StoreOpsTests from caffe2.distributed.store_ops_test_util import StoreOpsTests
from caffe2.python import core, workspace, dyndep from caffe2.python import core, workspace, dyndep
from caffe2.python.test_util import TestCase from caffe2.python.test_util import TestCase

View File

@ -6,7 +6,7 @@ from __future__ import unicode_literals
import os import os
import uuid import uuid
from caffe2.distributed.python import StoreHandlerTimeoutError from caffe2.distributed.python import StoreHandlerTimeoutError # type: ignore[import]
from caffe2.distributed.store_ops_test_util import StoreOpsTests from caffe2.distributed.store_ops_test_util import StoreOpsTests
from caffe2.python import core, workspace, dyndep from caffe2.python import core, workspace, dyndep
from caffe2.python.test_util import TestCase from caffe2.python.test_util import TestCase

View File

@ -84,7 +84,7 @@ import time
from caffe2.python import cnn, workspace, core from caffe2.python import cnn, workspace, core
import caffe2.python.SparseTransformer as SparseTransformer import caffe2.python.SparseTransformer as SparseTransformer # type: ignore[import]
def MLP(order): def MLP(order):
@ -604,8 +604,7 @@ def Benchmark(model_gen, arg):
"{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w" "{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w"
) as fid: ) as fid:
fid.write(str(model.param_init_net.Proto())) fid.write(str(model.param_init_net.Proto()))
with open("{0}.pbtxt".format(arg.model, with open("{0}.pbtxt".format(arg.model), "w") as fid:
arg.batch_size), "w") as fid:
fid.write(str(model.net.Proto())) fid.write(str(model.net.Proto()))
workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.param_init_net)

View File

@ -244,8 +244,8 @@ def is_request_only_scalar(scalar):
return True return True
# Contains features accessed in a model layer of a given type # Contains features accessed in a model layer of a given type
# type: A string representing the kind of feature, consistent with FeatureSpec # `type`: A string representing the kind of feature, consistent with FeatureSpec
# ids: A set of feature IDs that are accessed in the model layer # `ids`: A set of feature IDs that are accessed in the model layer
AccessedFeatures = namedtuple("AccessedFeatures", ["type", "ids"]) AccessedFeatures = namedtuple("AccessedFeatures", ["type", "ids"])
class ModelLayer(object): class ModelLayer(object):

View File

@ -8,7 +8,7 @@ import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import ( from caffe2.quantization.server.dnnlowp_test_utils import (
avoid_vpmaddubsw_overflow_fc, avoid_vpmaddubsw_overflow_fc,
check_quantized_results_close, check_quantized_results_close,
) )

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import ( from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close, check_quantized_results_close,
generate_conv_inputs, generate_conv_inputs,
generate_convnd_inputs, generate_convnd_inputs,

View File

@ -7,7 +7,10 @@ import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, utils, workspace from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import check_quantized_results_close, run_conv_or_fc from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
run_conv_or_fc
)
from hypothesis import assume, given from hypothesis import assume, given

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import ( from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close, check_quantized_results_close,
generate_conv_inputs, generate_conv_inputs,
generate_convnd_inputs, generate_convnd_inputs,

View File

@ -7,7 +7,10 @@ import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, utils, workspace from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import check_quantized_results_close, run_conv_or_fc from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
run_conv_or_fc
)
from hypothesis import assume, given from hypothesis import assume, given

View File

@ -5,9 +5,9 @@ import collections
import caffe2.python.hypothesis_test_util as hu import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from caffe2.python.fb import hardcode_scale_zp from caffe2.python.fb import hardcode_scale_zp # type: ignore[import]
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import ( from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close, check_quantized_results_close,
generate_conv_inputs, generate_conv_inputs,
run_conv_or_fc, run_conv_or_fc,

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -7,7 +7,10 @@ import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import check_quantized_results_close, run_conv_or_fc from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
run_conv_or_fc
)
from hypothesis import given from hypothesis import given

View File

@ -7,7 +7,7 @@ import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import ( from caffe2.quantization.server.dnnlowp_test_utils import (
avoid_vpmaddubsw_overflow_fc, avoid_vpmaddubsw_overflow_fc,
check_quantized_results_close, check_quantized_results_close,
run_conv_or_fc, run_conv_or_fc,

View File

@ -7,7 +7,7 @@ import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import ( from caffe2.quantization.server.dnnlowp_test_utils import (
avoid_vpmaddubsw_overflow_fc, avoid_vpmaddubsw_overflow_fc,
check_quantized_results_close, check_quantized_results_close,
run_conv_or_fc, run_conv_or_fc,

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -7,7 +7,7 @@ import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, utils, workspace from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -2,7 +2,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import numpy as np import numpy as np
from caffe2.python import core, workspace from caffe2.python import core, workspace
from caffe2.quantization.server import dnnlowp_pybind11 from caffe2.quantization.server import dnnlowp_pybind11 # type: ignore[attr-defined]
net = core.Net("test_net") net = core.Net("test_net")

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import assume, given from hypothesis import assume, given

View File

@ -6,7 +6,7 @@ import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, workspace from caffe2.python import core, dyndep, workspace
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -7,7 +7,7 @@ import hypothesis.strategies as st
import numpy as np import numpy as np
from caffe2.python import core, dyndep, utils, workspace from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import check_quantized_results_close from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given from hypothesis import given

View File

@ -6,7 +6,7 @@ from collections import defaultdict
import numpy as np import numpy as np
from caffe2.python import core, utils from caffe2.python import core, utils
from caffe2.python.fb import hardcode_scale_zp from caffe2.python.fb import hardcode_scale_zp # type: ignore[import]
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -7,5 +7,7 @@ To use it, install the following dependencies:
pip install mypy mypy-extensions pip install mypy mypy-extensions
# Run type checker in the pytorch/ directory # Run type checker in the pytorch/ directory
mypy @mypy-files.txt mypy
``` ```
Note that the minimum version of MyPy that is supported is 0.770

View File

@ -1 +0,0 @@
aten/src/ATen/function_wrapper.py

276
mypy.ini
View File

@ -1,2 +1,276 @@
# This is the PyTorch MyPy config file (note: don't change this line! -
# test_run_mypy in test/test_type_hints.py uses this string)
[mypy] [mypy]
python_version = 2.7 warn_unused_configs = True
warn_redundant_casts = True
show_error_codes = True
# Note: test/ still have syntax errors so can't be added
files =
torch,
caffe2,
aten/src/ATen/function_wrapper.py
# Minimum version supported - variable annotations were introduced
# in Python 3.6
python_version = 3.6
#
# Third party dependencies that don't have types.
#
[mypy-tensorflow.*]
ignore_missing_imports = True
[mypy-tensorboard.*]
ignore_missing_imports = True
[mypy-onnx.*]
ignore_missing_imports = True
[mypy-matplotlib.*]
ignore_missing_imports = True
[mypy-numpy.*]
ignore_missing_imports = True
[mypy-hypothesis.*]
ignore_missing_imports = True
[mypy-tqdm.*]
ignore_missing_imports = True
[mypy-multiprocessing.*]
ignore_missing_imports = True
[mypy-setuptools.*]
ignore_missing_imports = True
[mypy-nvd3.*]
ignore_missing_imports = True
[mypy-future.utils]
ignore_missing_imports = True
[mypy-past.builtins]
ignore_missing_imports = True
[mypy-numba.*]
ignore_missing_imports = True
[mypy-PIL.*]
ignore_missing_imports = True
[mypy-moviepy.*]
ignore_missing_imports = True
[mypy-cv2.*]
ignore_missing_imports = True
[mypy-torchvision.*]
ignore_missing_imports = True
[mypy-pycuda.*]
ignore_missing_imports = True
[mypy-tensorrt.*]
ignore_missing_imports = True
[mypy-tornado.*]
ignore_missing_imports = True
[mypy-pydot.*]
ignore_missing_imports = True
[mypy-networkx.*]
ignore_missing_imports = True
[mypy-scipy.*]
ignore_missing_imports = True
[mypy-IPython.*]
ignore_missing_imports = True
[mypy-google.protobuf.textformat]
ignore_missing_imports = True
[mypy-lmdb.*]
ignore_missing_imports = True
[mypy-mpi4py.*]
ignore_missing_imports = True
[mypy-skimage.*]
ignore_missing_imports = True
#
# Extension modules without stubs.
#
[mypy-torch._C]
ignore_missing_imports = True
[mypy-torch._C._jit_tree_views]
ignore_missing_imports = True
[mypy-torch.for_onnx.onnx]
ignore_missing_imports = True
#
# Files with various errors. Mostly real errors, possibly some false
# positives as well.
#
[mypy-torch.distributed.*]
ignore_errors = True
[mypy-torch.functional.*]
ignore_errors = True
[mypy-torch.testing._internal.*]
ignore_errors = True
[mypy-torch.quantization.default_mappings]
ignore_errors = True
[mypy-torch.quantization.observer]
ignore_errors = True
[mypy-torch.quantization.fake_quantize]
ignore_errors = True
[mypy-torch.quantization._quantize_script]
ignore_errors = True
[mypy-torch.distributions.*]
ignore_errors = True
[mypy-torch.jit]
ignore_errors = True
[mypy-torch.jit.quantized]
ignore_errors = True
[mypy-torch._jit_internal]
ignore_errors = True
[mypy-torch.jit._builtins]
ignore_errors = True
[mypy-torch.jit._logging]
ignore_errors = True
[mypy-torch.jit.annotations]
ignore_errors = True
[mypy-torch.sparse]
ignore_errors = True
[mypy-torch.tensor]
ignore_errors = True
[mypy-torch._tensor_str]
ignore_errors = True
[mypy-torch.nn.quantized.functional]
ignore_errors = True
[mypy-torch.nn.quantized.modules.activation]
ignore_errors = True
[mypy-torch.nn.qat.modules.activations]
ignore_errors = True
[mypy-torch.nn.quantized.dynamic.modules.rnn]
ignore_errors = True
[mypy-torch.nn.quantized.modules.conv]
ignore_errors = True
[mypy-torch.cuda.*]
ignore_errors = True
[mypy-torch._lobpcg]
ignore_errors = True
[mypy-torch.storage]
ignore_errors = True
[mypy-torch.utils.bundled_inputs]
ignore_errors = True
[mypy-torch.utils.data]
ignore_errors = True
[mypy-torch.utils.data.dataset]
ignore_errors = True
[mypy-torch.utils.data.distributed]
ignore_errors = True
[mypy-torch.nn.utils.prune]
ignore_errors = True
[mypy-torch.nn.cpp]
ignore_errors = True
[mypy-torch.nn.functional]
ignore_errors = True
[mypy-torch.utils.show_pickle]
ignore_errors = True
[mypy-torch.utils.hipify.hipify_python]
ignore_errors = True
[mypy-torch.autograd.function]
ignore_errors = True
[mypy-torch.autograd.variable]
ignore_errors = True
[mypy-torch.serialization]
ignore_errors = True
[mypy-torch.nn.quantized.modules.linear]
ignore_errors = True
[mypy-torch.nn.intrinsic.quantized.modules.conv_relu]
ignore_errors = True
[mypy-torch.nn.intrinsic.qat.modules.conv_fused]
ignore_errors = True
[mypy-torch.onnx.symbolic_opset8]
ignore_errors = True
[mypy-torch.onnx.symbolic_helper]
ignore_errors = True
[mypy-torch.multiprocessing]
ignore_errors = True
[mypy-torch.multiprocessing.spawn]
ignore_errors = True
[mypy-torch.backends.cuda]
ignore_errors = True
[mypy-torch.backends.cudnn]
ignore_errors = True
[mypy-torch.backends.quantized]
ignore_errors = True
[mypy-caffe2.python.*]
ignore_errors = True
[mypy-caffe2.proto.*]
ignore_errors = True
[mypy-caffe2.contrib.gloo.gloo_test]
ignore_errors = True
[mypy-caffe2.quantization.server.pool_dnnlowp_op_test]
ignore_errors = True

View File

@ -377,23 +377,25 @@ class TestClassType(JitTestCase):
def getVal(self): def getVal(self):
return self.x return self.x
def test(li, reverse=False): # Disabled test because JIT doesn't like the type annotation,
# type: (List[Foo], bool) # see gh-36902
li_sorted = sorted(li) # def test(li, reverse=False):
ret_sorted = torch.jit.annotate(List[int], []) # # type: (List[Foo], bool) -> (List[int], List[int])
for foo in li_sorted: # li_sorted = sorted(li)
ret_sorted.append(foo.getVal()) # ret_sorted = torch.jit.annotate(List[int], [])
# for foo in li_sorted:
li.sort(reverse=reverse) # ret_sorted.append(foo.getVal())
ret_sort = torch.jit.annotate(List[int], []) #
for foo in li: # li.sort(reverse=reverse)
ret_sort.append(foo.getVal()) # ret_sort = torch.jit.annotate(List[int], [])
return ret_sorted, ret_sort # for foo in li:
# ret_sort.append(foo.getVal())
self.checkScript(test, ([Foo(2), Foo(1), Foo(3)],)) # return ret_sorted, ret_sort
self.checkScript(test, ([Foo(2), Foo(1), Foo(3)], True)) #
self.checkScript(test, ([Foo(2)],)) # self.checkScript(test, ([Foo(2), Foo(1), Foo(3)],))
self.checkScript(test, ([],)) # self.checkScript(test, ([Foo(2), Foo(1), Foo(3)], True))
# self.checkScript(test, ([Foo(2)],))
# self.checkScript(test, ([],))
@torch.jit.script @torch.jit.script
def test_list_no_reverse(): def test_list_no_reverse():
@ -417,6 +419,7 @@ class TestClassType(JitTestCase):
li = [Foo(1)] li = [Foo(1)]
li.sort(li) li.sort(li)
return li return li
test()
with self.assertRaisesRegex(RuntimeError, "must define a __lt__"): with self.assertRaisesRegex(RuntimeError, "must define a __lt__"):
@torch.jit.script @torch.jit.script

View File

@ -450,14 +450,14 @@ class TestList(JitTestCase):
self.assertEqual(fn(*inputs), torch.jit.script(fn)(*inputs)) self.assertEqual(fn(*inputs), torch.jit.script(fn)(*inputs))
def foo(names, results): def foo(names, results):
# type: (List[int], List[int]) # type: (List[int], List[int]) -> List[Tuple[int, int]]
return [(k + 5, v - 2) for k, v in zip(names, results)] return [(k + 5, v - 2) for k, v in zip(names, results)]
test_func(foo, ([1, 2, 4], [4, 7, 9])) test_func(foo, ([1, 2, 4], [4, 7, 9]))
test_func(foo, ([5], [4, 7, 9])) test_func(foo, ([5], [4, 7, 9]))
def fn(x): def fn(x):
# type: (int) # type: (int) -> List[int]
return [i for i in range(x)] # noqa: C416 return [i for i in range(x)] # noqa: C416
test_func(fn, (9,)) test_func(fn, (9,))

View File

@ -163,6 +163,7 @@ class TestTypeHints(TestCase):
'-mmypy', '-mmypy',
'--follow-imports', 'silent', '--follow-imports', 'silent',
'--check-untyped-defs', '--check-untyped-defs',
'--no-strict-optional', # needed because of torch.lu_unpack, see gh-36584
os.path.abspath(fn)], os.path.abspath(fn)],
cwd=tmp_dir, cwd=tmp_dir,
check=True) check=True)
@ -179,17 +180,49 @@ class TestTypeHints(TestCase):
examples_folder = os.path.join(test_path, "type_hint_tests") examples_folder = os.path.join(test_path, "type_hint_tests")
examples = os.listdir(examples_folder) examples = os.listdir(examples_folder)
for example in examples: for example in examples:
try: try:
example_path = os.path.join(examples_folder, example) example_path = os.path.join(examples_folder, example)
subprocess.run([ subprocess.run([
sys.executable, sys.executable,
'-mmypy', '-mmypy',
'--follow-imports', 'silent', '--follow-imports', 'silent',
'--check-untyped-defs', '--check-untyped-defs',
example_path], example_path],
check=True) check=True)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
raise AssertionError("mypy failed for example {}. Look above this error for mypy's output.".format(example)) raise AssertionError("mypy failed for example {}. Look above this error for mypy's output.".format(example))
@unittest.skipIf(sys.version_info[0] == 2, "no type hints for Python 2")
@unittest.skipIf(not HAVE_MYPY, "need mypy")
def test_run_mypy(self):
"""
Runs mypy over all files specified in mypy.ini
Note that mypy.ini is not shipped in an installed version of PyTorch,
so this test will only run mypy in a development setup or in CI.
"""
def is_torch_mypyini(path_to_file):
with open(path_to_file, 'r') as f:
first_line = f.readline()
if first_line.startswith('# This is the PyTorch MyPy config file'):
return True
return False
test_dir = os.path.dirname(os.path.realpath(__file__))
repo_rootdir = os.path.join(test_dir, '..')
mypy_inifile = os.path.join(repo_rootdir, 'mypy.ini')
if not (os.path.exists(mypy_inifile) and is_torch_mypyini(mypy_inifile)):
self.skipTest(True)
cwd = os.getcwd()
try:
os.chdir(repo_rootdir)
subprocess.run([sys.executable, '-mmypy'], check=True)
except subprocess.CalledProcessError as e:
raise AssertionError("mypy failed. Look above this error for mypy's output.")
finally:
os.chdir(cwd)
if __name__ == '__main__': if __name__ == '__main__':
run_tests() run_tests()

View File

@ -2,7 +2,10 @@
""" """
from typing import Optional, Tuple
import torch import torch
from torch import Tensor
def is_sparse(A): def is_sparse(A):

View File

@ -3,7 +3,10 @@
# Author: Pearu Peterson # Author: Pearu Peterson
# Created: February 2020 # Created: February 2020
from typing import Dict, Tuple, Optional
import torch import torch
from torch import Tensor
from . import _linalg_utils as _utils from . import _linalg_utils as _utils
from ._overrides import has_torch_function, handle_torch_function from ._overrides import has_torch_function, handle_torch_function

View File

@ -3,8 +3,11 @@
__all__ = ['svd_lowrank', 'pca_lowrank'] __all__ = ['svd_lowrank', 'pca_lowrank']
from typing import Tuple, Optional
import torch import torch
from . import _linalg_utils as _utils from torch import Tensor
from . import _linalg_utils as _utils
from ._overrides import has_torch_function, handle_torch_function from ._overrides import has_torch_function, handle_torch_function

View File

@ -10,7 +10,7 @@ try:
except ImportError: except ImportError:
import functools import functools
class ContextDecorator(object): class ContextDecorator(object): # type: ignore[no-redef]
def __call__(self, func): def __call__(self, func):
@functools.wraps(func) @functools.wraps(func)
def wrapped(*args, **kwargs): def wrapped(*args, **kwargs):

View File

@ -1,3 +1,5 @@
from typing import Tuple, Optional
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from ._lowrank import svd_lowrank, pca_lowrank from ._lowrank import svd_lowrank, pca_lowrank

View File

@ -20,7 +20,7 @@ except ImportError:
from tqdm import tqdm from tqdm import tqdm
except ImportError: except ImportError:
# fake tqdm if it's not installed # fake tqdm if it's not installed
class tqdm(object): class tqdm(object): # type: ignore
def __init__(self, total=None, disable=False, def __init__(self, total=None, disable=False,
unit=None, unit_scale=None, unit_divisor=None): unit=None, unit_scale=None, unit_divisor=None):

View File

@ -1,4 +1,5 @@
import warnings import warnings
from typing import Optional
# NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h # NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h

View File

@ -3633,7 +3633,7 @@ def normalize(input, p=2, dim=1, eps=1e-12, out=None):
def assert_int_or_pair(arg, arg_name, message): def assert_int_or_pair(arg, arg_name, message):
# type: (List[int], str, str) # type: (List[int], str, str) -> None
assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name) assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name)

View File

@ -4,6 +4,8 @@ import math
import warnings import warnings
import torch import torch
from torch import Tensor
# These no_grad_* functions are necessary as wrappers around the parts of these # These no_grad_* functions are necessary as wrappers around the parts of these
# functions that use `with torch.no_grad()`. The JIT doesn't support context # functions that use `with torch.no_grad()`. The JIT doesn't support context
@ -363,7 +365,7 @@ def kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
Args: Args:
tensor: an n-dimensional `torch.Tensor` tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``) used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the preserves the magnitude of the variance of the weights in the
@ -398,7 +400,7 @@ def kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
Args: Args:
tensor: an n-dimensional `torch.Tensor` tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``) used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the preserves the magnitude of the variance of the weights in the

View File

@ -1,5 +1,8 @@
import warnings import warnings
from typing import Tuple, Optional
import torch import torch
from torch import Tensor
from . import Linear from . import Linear
from torch.nn.init import xavier_uniform_ from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_ from torch.nn.init import constant_

View File

@ -1,7 +1,9 @@
# coding=utf-8 # coding=utf-8
import math import math
import warnings import warnings
import torch import torch
from torch import Tensor
from torch.nn.parameter import Parameter from torch.nn.parameter import Parameter
from .. import functional as F from .. import functional as F
from .. import init from .. import init

View File

@ -3,6 +3,9 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from __future__ import unicode_literals from __future__ import unicode_literals
from typing import List, Optional
from torch import Tensor
from .module import Module from .module import Module
from .utils import _single, _pair, _triple from .utils import _single, _pair, _triple
from .. import functional as F from .. import functional as F

View File

@ -1,8 +1,10 @@
import math import math
import torch
import warnings import warnings
import numbers import numbers
from typing import Tuple, Optional, overload
import torch
from torch import Tensor
from .module import Module from .module import Module
from ..parameter import Parameter from ..parameter import Parameter
from ..utils.rnn import PackedSequence from ..utils.rnn import PackedSequence
@ -530,11 +532,13 @@ class LSTM(RNNBase):
return hx return hx
return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation) return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation)
@overload
@torch._jit_internal._overload_method # noqa: F811 @torch._jit_internal._overload_method # noqa: F811
def forward(self, input, hx=None): # noqa: F811 def forward(self, input, hx=None): # noqa: F811
# type: (Tensor, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tuple[Tensor, Tensor]] # type: (Tensor, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
pass pass
@overload
@torch._jit_internal._overload_method # noqa: F811 @torch._jit_internal._overload_method # noqa: F811
def forward(self, input, hx=None): # noqa: F811 def forward(self, input, hx=None): # noqa: F811
# type: (PackedSequence, Optional[Tuple[Tensor, Tensor]]) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]] # noqa # type: (PackedSequence, Optional[Tuple[Tensor, Tensor]]) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]] # noqa
@ -688,11 +692,13 @@ class GRU(RNNBase):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(GRU, self).__init__('GRU', *args, **kwargs) super(GRU, self).__init__('GRU', *args, **kwargs)
@overload
@torch._jit_internal._overload_method # noqa: F811 @torch._jit_internal._overload_method # noqa: F811
def forward(self, input, hx=None): # noqa: F811 def forward(self, input, hx=None): # noqa: F811
# type: (Tensor, Optional[Tensor]) -> Tuple[Tensor, Tensor] # type: (Tensor, Optional[Tensor]) -> Tuple[Tensor, Tensor]
pass pass
@overload
@torch._jit_internal._overload_method # noqa: F811 @torch._jit_internal._overload_method # noqa: F811
def forward(self, input, hx=None): # noqa: F811 def forward(self, input, hx=None): # noqa: F811
# type: (PackedSequence, Optional[Tensor]) -> Tuple[PackedSequence, Tensor] # type: (PackedSequence, Optional[Tensor]) -> Tuple[PackedSequence, Tensor]

View File

@ -1,4 +1,7 @@
from typing import Optional
import torch import torch
from torch import Tensor
from torch.nn.parameter import Parameter from torch.nn.parameter import Parameter
from .module import Module from .module import Module

View File

@ -1,5 +1,8 @@
import torch
import copy import copy
from typing import Optional
import torch
from torch import Tensor
from .. import functional as F from .. import functional as F
from .module import Module from .module import Module
from .activation import MultiheadAttention from .activation import MultiheadAttention

View File

@ -1,3 +1,5 @@
from typing import List
from torch._six import container_abcs from torch._six import container_abcs
from itertools import repeat from itertools import repeat

View File

@ -4,8 +4,10 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from __future__ import unicode_literals from __future__ import unicode_literals
from typing import List, Optional
import torch import torch
from torch._jit_internal import List as _List from torch import Tensor
from torch.nn.modules.utils import _pair, _triple from torch.nn.modules.utils import _pair, _triple
# Although some of the functions and docstrings are mirrored from the torch.nn, # Although some of the functions and docstrings are mirrored from the torch.nn,
@ -301,7 +303,7 @@ def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
if return_indices: if return_indices:
raise NotImplementedError("return_indices is not yet implemented!") raise NotImplementedError("return_indices is not yet implemented!")
if stride is None: if stride is None:
stride = torch.jit.annotate(_List[int], []) stride = torch.jit.annotate(List[int], [])
return torch.nn.functional.max_pool2d(input, kernel_size, stride, padding, return torch.nn.functional.max_pool2d(input, kernel_size, stride, padding,
dilation, ceil_mode, return_indices) dilation, ceil_mode, return_indices)

View File

@ -6,6 +6,8 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from __future__ import unicode_literals from __future__ import unicode_literals
from typing import Optional, List
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.intrinsic as nni import torch.nn.intrinsic as nni

View File

@ -1,4 +1,7 @@
from typing import List
import torch import torch
from torch import Tensor
from torch._ops import ops from torch._ops import ops

View File

@ -1,5 +1,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
from typing import List, Optional
import torch import torch
from .qconfig import QConfig from .qconfig import QConfig
from torch.jit._recursive import wrap_cpp_module from torch.jit._recursive import wrap_cpp_module
@ -85,7 +87,7 @@ def _prepare_script(model, qconfig_dict, is_dynamic):
'forward', 'forward',
scripted_qconfig_dict, scripted_qconfig_dict,
False, False,
is_dynamic)) is_dynamic))
def prepare_script(model, qconfig_dict, inplace=False): def prepare_script(model, qconfig_dict, inplace=False):
if not inplace: if not inplace:

View File

@ -4,10 +4,10 @@ import math
import warnings import warnings
from abc import ABCMeta, abstractmethod from abc import ABCMeta, abstractmethod
from functools import partial from functools import partial
from typing import List, Tuple, Optional
import torch import torch
import torch.nn as nn import torch.nn as nn
from torch._jit_internal import List, Optional
def _with_args(cls_or_self, **kwargs): def _with_args(cls_or_self, **kwargs):
r"""Wrapper that allows creation of class factories. r"""Wrapper that allows creation of class factories.

View File

@ -1,5 +1,8 @@
# The Tensor classes are added to this module by python_tensor.cpp # The Tensor classes are added to this module by python_tensor.cpp
from typing import Optional, Tuple
import torch import torch
from torch import Tensor
__all__ = [ __all__ = [
'addmm', 'addmm',

View File

@ -1,4 +1,4 @@
from typing import Tuple from typing import Tuple, Dict
import torch import torch
import torch.distributed.autograd as dist_autograd import torch.distributed.autograd as dist_autograd

View File

@ -145,7 +145,7 @@ class MyScriptModuleWithRRefs(torch.jit.ScriptModule):
@torch.jit.script @torch.jit.script
class MyScriptClass: class MyScriptClass:
def __init__(self, a): def __init__(self, a):
# type: (int) # type: (int) -> None
self.a = a self.a = a
def get_value(self): def get_value(self):

View File

@ -44,7 +44,7 @@ if IS_WINDOWS:
self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0 self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0
return not self.manager_dead return not self.manager_dead
else: else:
class ManagerWatchdog(object): class ManagerWatchdog(object): # type: ignore[no-redef]
def __init__(self): def __init__(self):
self.manager_pid = os.getppid() self.manager_pid = os.getppid()
self.manager_dead = False self.manager_dead = False