mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: This PR fixes a couple of syntax errors in `torch/` that prevent MyPy from running, fixes simple type annotation errors (e.g. missing `from typing import List, Tuple, Optional`), and adds granular ignores for errors in particular modules as well as for missing typing in third party packages. As a result, running `mypy` in the root dir of the repo now runs on: - `torch/` - `aten/src/ATen/function_wrapper.py` (the only file already covered in CI) In CI this runs on GitHub Actions, job Lint, sub-job "quick-checks", task "MyPy typecheck". It should give (right now): `Success: no issues found in 329 source files`. Here are the details of the original 855 errors when running `mypy torch` on current master (after fixing the couple of syntax errors that prevent `mypy` from running through): <details> ``` torch/utils/tensorboard/_proto_graph.py:1: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.node_def_pb2' torch/utils/tensorboard/_proto_graph.py:2: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.attr_value_pb2' torch/utils/tensorboard/_proto_graph.py:3: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.tensor_shape_pb2' torch/utils/backcompat/__init__.py:1: error: Cannot find implementation or library stub for module named 'torch._C' torch/for_onnx/__init__.py:1: error: Cannot find implementation or library stub for module named 'torch.for_onnx.onnx' torch/cuda/nvtx.py:2: error: Cannot find implementation or library stub for module named 'torch._C' torch/utils/show_pickle.py:59: error: Name 'pickle._Unpickler' is not defined torch/utils/show_pickle.py:113: error: "Type[PrettyPrinter]" has no attribute "_dispatch" torch/utils/tensorboard/_onnx_graph.py:1: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.graph_pb2' torch/utils/tensorboard/_onnx_graph.py:2: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.node_def_pb2' torch/utils/tensorboard/_onnx_graph.py:3: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.versions_pb2' torch/utils/tensorboard/_onnx_graph.py:4: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.attr_value_pb2' torch/utils/tensorboard/_onnx_graph.py:5: error: Cannot find implementation or library stub for module named 'tensorboard.compat.proto.tensor_shape_pb2' torch/utils/tensorboard/_onnx_graph.py:9: error: Cannot find implementation or library stub for module named 'onnx' torch/contrib/_tensorboard_vis.py:10: error: Cannot find implementation or library stub for module named 'tensorflow.core.util' torch/contrib/_tensorboard_vis.py:11: error: Cannot find implementation or library stub for module named 'tensorflow.core.framework' torch/contrib/_tensorboard_vis.py:12: error: Cannot find implementation or library stub for module named 'tensorflow.python.summary.writer.writer' torch/utils/hipify/hipify_python.py:43: error: Need type annotation for 'CAFFE2_TEMPLATE_MAP' (hint: "CAFFE2_TEMPLATE_MAP: Dict[<type>, <type>] = ...") torch/utils/hipify/hipify_python.py:636: error: "object" has no attribute "items" torch/nn/_reduction.py:27: error: Name 'Optional' is not defined torch/nn/_reduction.py:27: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/nn/_reduction.py:47: error: Name 'Optional' is not defined torch/nn/_reduction.py:47: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/utils/tensorboard/_utils.py:17: error: Skipping analyzing 'matplotlib.pyplot': found module but no type hints or library stubs torch/utils/tensorboard/_utils.py:17: error: Skipping analyzing 'matplotlib': found module but no type hints or library stubs torch/utils/tensorboard/_utils.py:18: error: Skipping analyzing 'matplotlib.backends.backend_agg': found module but no type hints or library stubs torch/utils/tensorboard/_utils.py:18: error: Skipping analyzing 'matplotlib.backends': found module but no type hints or library stubs torch/nn/modules/utils.py:27: error: Name 'List' is not defined torch/nn/modules/utils.py:27: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List") caffe2/proto/caffe2_pb2.py:17: error: Unexpected keyword argument "serialized_options" for "FileDescriptor"; did you mean "serialized_pb"? caffe2/proto/caffe2_pb2.py:25: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor" caffe2/proto/caffe2_pb2.py:31: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:35: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:39: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:43: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:47: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:51: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:55: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:59: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:63: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:67: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:71: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:75: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:102: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor" caffe2/proto/caffe2_pb2.py:108: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:112: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:124: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor" caffe2/proto/caffe2_pb2.py:130: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:134: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:138: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:142: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:146: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:150: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:154: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:158: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:162: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:166: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:170: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:174: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:178: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:182: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:194: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor" caffe2/proto/caffe2_pb2.py:200: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:204: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:208: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:212: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:224: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor" caffe2/proto/caffe2_pb2.py:230: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:234: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:238: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:242: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:246: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:250: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:254: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/caffe2_pb2.py:267: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:274: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:281: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:288: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:295: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:302: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:327: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:334: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:341: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:364: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:371: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:378: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:385: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:392: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:399: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:406: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:413: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:420: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:427: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:434: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:441: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:448: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:455: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:462: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:488: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:495: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:502: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:509: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:516: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:523: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:530: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:537: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:544: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:551: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:558: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:565: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:572: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:596: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:603: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:627: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:634: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:641: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:648: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:655: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:662: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:686: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:693: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:717: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:724: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:731: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:738: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:763: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:770: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:777: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:784: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:808: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:815: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:822: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:829: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:836: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:843: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:850: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:857: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:864: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:871: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:878: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:885: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:892: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:916: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:923: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:930: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:937: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:944: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:951: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:958: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:982: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:989: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:996: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1003: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1010: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1017: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1024: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1031: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1038: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1045: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1052: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1059: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1066: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1090: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:1097: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1104: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1128: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:1135: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1142: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1166: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:1173: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1180: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1187: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1194: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1218: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:1225: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1232: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1239: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1246: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1253: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1260: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1267: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1274: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1281: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1305: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:1312: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1319: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1326: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1333: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1340: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1347: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1354: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1361: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1368: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1375: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1382: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1389: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1396: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1420: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:1427: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1434: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1441: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1465: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:1472: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1479: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1486: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1493: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1500: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1507: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1514: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1538: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/caffe2_pb2.py:1545: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1552: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1559: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1566: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/caffe2_pb2.py:1667: error: "GeneratedProtocolMessageType" has no attribute "Segment" torch/multiprocessing/queue.py:4: error: No library stub file for standard library module 'multiprocessing.reduction' caffe2/proto/torch_pb2.py:18: error: Unexpected keyword argument "serialized_options" for "FileDescriptor"; did you mean "serialized_pb"? caffe2/proto/torch_pb2.py:27: error: Unexpected keyword argument "serialized_options" for "EnumDescriptor" caffe2/proto/torch_pb2.py:33: error: Unexpected keyword argument "serialized_options" for "EnumValueDescriptor" caffe2/proto/torch_pb2.py:50: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/torch_pb2.py:57: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:81: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/torch_pb2.py:88: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:95: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:102: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:109: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:116: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:123: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:130: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:137: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:144: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:151: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:175: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/torch_pb2.py:182: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:189: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:196: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:220: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/torch_pb2.py:227: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:234: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:241: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:265: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/torch_pb2.py:272: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:279: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:286: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:293: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:300: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:307: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:314: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:321: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:328: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:335: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:342: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:366: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/torch_pb2.py:373: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:397: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/torch_pb2.py:404: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:411: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:418: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:425: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/torch_pb2.py:432: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:17: error: Unexpected keyword argument "serialized_options" for "FileDescriptor"; did you mean "serialized_pb"? caffe2/proto/metanet_pb2.py:29: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/metanet_pb2.py:36: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:43: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:50: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:57: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:64: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:88: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/metanet_pb2.py:95: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:102: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:126: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/metanet_pb2.py:133: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:140: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:164: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/metanet_pb2.py:171: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:178: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:202: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/metanet_pb2.py:209: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:216: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:240: error: Unexpected keyword argument "serialized_options" for "Descriptor" caffe2/proto/metanet_pb2.py:247: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:254: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:261: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:268: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:275: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:282: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:289: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/metanet_pb2.py:296: error: Unexpected keyword argument "serialized_options" for "FieldDescriptor" caffe2/proto/__init__.py:13: error: Skipping analyzing 'caffe2.caffe2.fb.session.proto': found module but no type hints or library stubs torch/multiprocessing/pool.py:3: error: No library stub file for standard library module 'multiprocessing.util' torch/multiprocessing/pool.py:3: note: (Stub files are from https://github.com/python/typeshed) caffe2/python/scope.py:10: error: Skipping analyzing 'past.builtins': found module but no type hints or library stubs caffe2/python/__init__.py:7: error: Module has no attribute "CPU" caffe2/python/__init__.py:8: error: Module has no attribute "CUDA" caffe2/python/__init__.py:9: error: Module has no attribute "MKLDNN" caffe2/python/__init__.py:10: error: Module has no attribute "OPENGL" caffe2/python/__init__.py:11: error: Module has no attribute "OPENCL" caffe2/python/__init__.py:12: error: Module has no attribute "IDEEP" caffe2/python/__init__.py:13: error: Module has no attribute "HIP" caffe2/python/__init__.py:14: error: Module has no attribute "COMPILE_TIME_MAX_DEVICE_TYPES"; maybe "PROTO_COMPILE_TIME_MAX_DEVICE_TYPES"? caffe2/python/__init__.py:15: error: Module has no attribute "ONLY_FOR_TEST"; maybe "PROTO_ONLY_FOR_TEST"? caffe2/python/__init__.py:34: error: Item "_Loader" of "Optional[_Loader]" has no attribute "exec_module" caffe2/python/__init__.py:34: error: Item "None" of "Optional[_Loader]" has no attribute "exec_module" caffe2/python/__init__.py:35: error: Module has no attribute "cuda" caffe2/python/__init__.py:37: error: Module has no attribute "cuda" caffe2/python/__init__.py:49: error: Module has no attribute "add_dll_directory" torch/random.py:4: error: Cannot find implementation or library stub for module named 'torch._C' torch/_classes.py:2: error: Cannot find implementation or library stub for module named 'torch._C' torch/onnx/__init__.py:1: error: Cannot find implementation or library stub for module named 'torch._C' torch/hub.py:21: error: Skipping analyzing 'tqdm.auto': found module but no type hints or library stubs torch/hub.py:24: error: Skipping analyzing 'tqdm': found module but no type hints or library stubs torch/hub.py:27: error: Name 'tqdm' already defined (possibly by an import) torch/_tensor_str.py:164: error: Not all arguments converted during string formatting torch/_ops.py:1: error: Cannot find implementation or library stub for module named 'torch._C' torch/_linalg_utils.py:26: error: Name 'Optional' is not defined torch/_linalg_utils.py:26: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/_linalg_utils.py:26: error: Name 'Tensor' is not defined torch/_linalg_utils.py:63: error: Name 'Tensor' is not defined torch/_linalg_utils.py:63: error: Name 'Optional' is not defined torch/_linalg_utils.py:63: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/_linalg_utils.py:70: error: Name 'Optional' is not defined torch/_linalg_utils.py:70: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/_linalg_utils.py:70: error: Name 'Tensor' is not defined torch/_linalg_utils.py:88: error: Name 'Tensor' is not defined torch/_linalg_utils.py:88: error: Name 'Optional' is not defined torch/_linalg_utils.py:88: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/_linalg_utils.py:88: error: Name 'Tuple' is not defined torch/_linalg_utils.py:88: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/_jit_internal.py:17: error: Need type annotation for 'boolean_dispatched' torch/_jit_internal.py:474: error: Need type annotation for '_overloaded_fns' (hint: "_overloaded_fns: Dict[<type>, <type>] = ...") torch/_jit_internal.py:512: error: Need type annotation for '_overloaded_methods' (hint: "_overloaded_methods: Dict[<type>, <type>] = ...") torch/_jit_internal.py:648: error: Incompatible types in assignment (expression has type "FinalCls", variable has type "_SpecialForm") torch/sparse/__init__.py:11: error: Name 'Tensor' is not defined torch/sparse/__init__.py:71: error: Name 'Tensor' is not defined torch/sparse/__init__.py:71: error: Name 'Optional' is not defined torch/sparse/__init__.py:71: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/sparse/__init__.py:71: error: Name 'Tuple' is not defined torch/sparse/__init__.py:71: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/nn/init.py:109: error: Name 'Tensor' is not defined torch/nn/init.py:126: error: Name 'Tensor' is not defined torch/nn/init.py:142: error: Name 'Tensor' is not defined torch/nn/init.py:165: error: Name 'Tensor' is not defined torch/nn/init.py:180: error: Name 'Tensor' is not defined torch/nn/init.py:194: error: Name 'Tensor' is not defined torch/nn/init.py:287: error: Name 'Tensor' is not defined torch/nn/init.py:315: error: Name 'Tensor' is not defined torch/multiprocessing/reductions.py:8: error: No library stub file for standard library module 'multiprocessing.util' torch/multiprocessing/reductions.py:9: error: No library stub file for standard library module 'multiprocessing.reduction' torch/multiprocessing/reductions.py:17: error: No library stub file for standard library module 'multiprocessing.resource_sharer' torch/jit/_builtins.py:72: error: Module has no attribute "_no_grad_embedding_renorm_" torch/jit/_builtins.py:80: error: Module has no attribute "stft" torch/jit/_builtins.py:81: error: Module has no attribute "cdist" torch/jit/_builtins.py:82: error: Module has no attribute "norm" torch/jit/_builtins.py:83: error: Module has no attribute "nuclear_norm" torch/jit/_builtins.py:84: error: Module has no attribute "frobenius_norm" torch/backends/cudnn/__init__.py:8: error: Cannot find implementation or library stub for module named 'torch._C' torch/backends/cudnn/__init__.py:86: error: Need type annotation for '_handles' (hint: "_handles: Dict[<type>, <type>] = ...") torch/autograd/profiler.py:13: error: Name 'ContextDecorator' already defined (possibly by an import) torch/autograd/function.py:2: error: Cannot find implementation or library stub for module named 'torch._C' torch/autograd/function.py:2: note: See https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports torch/autograd/function.py:109: error: Unsupported dynamic base class "with_metaclass" torch/serialization.py:609: error: "Callable[[Any], Any]" has no attribute "cache" torch/_lowrank.py:11: error: Name 'Tensor' is not defined torch/_lowrank.py:13: error: Name 'Optional' is not defined torch/_lowrank.py:13: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/_lowrank.py:14: error: Name 'Optional' is not defined torch/_lowrank.py:14: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/_lowrank.py:14: error: Name 'Tensor' is not defined torch/_lowrank.py:82: error: Name 'Tensor' is not defined torch/_lowrank.py:82: error: Name 'Optional' is not defined torch/_lowrank.py:82: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/_lowrank.py:82: error: Name 'Tuple' is not defined torch/_lowrank.py:82: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/_lowrank.py:130: error: Name 'Tensor' is not defined torch/_lowrank.py:130: error: Name 'Optional' is not defined torch/_lowrank.py:130: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/_lowrank.py:130: error: Name 'Tuple' is not defined torch/_lowrank.py:130: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/_lowrank.py:167: error: Name 'Tensor' is not defined torch/_lowrank.py:167: error: Name 'Optional' is not defined torch/_lowrank.py:167: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/_lowrank.py:167: error: Name 'Tuple' is not defined torch/_lowrank.py:167: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/quantization/observer.py:45: error: Variable "torch.quantization.observer.ABC" is not valid as a type torch/quantization/observer.py:45: note: See https://mypy.readthedocs.io/en/latest/common_issues.html#variables-vs-type-aliases torch/quantization/observer.py:45: error: Invalid base class "ABC" torch/quantization/observer.py:127: error: Name 'Tensor' is not defined torch/quantization/observer.py:127: error: Name 'Tuple' is not defined torch/quantization/observer.py:127: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/quantization/observer.py:172: error: Module has no attribute "per_tensor_symmetric" torch/quantization/observer.py:172: error: Module has no attribute "per_channel_symmetric" torch/quantization/observer.py:192: error: Name 'Tensor' is not defined torch/quantization/observer.py:192: error: Name 'Tuple' is not defined torch/quantization/observer.py:192: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/quantization/observer.py:233: error: Module has no attribute "per_tensor_symmetric" torch/quantization/observer.py:233: error: Module has no attribute "per_channel_symmetric" torch/quantization/observer.py:534: error: Name 'Tensor' is not defined torch/quantization/observer.py:885: error: Name 'Tensor' is not defined torch/quantization/observer.py:885: error: Name 'Tuple' is not defined torch/quantization/observer.py:885: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/quantization/observer.py:894: error: Cannot determine type of 'max_val' torch/quantization/observer.py:894: error: Cannot determine type of 'min_val' torch/quantization/observer.py:899: error: Cannot determine type of 'min_val' torch/quantization/observer.py:902: error: Name 'Tensor' is not defined torch/quantization/observer.py:925: error: Name 'Tensor' is not defined torch/quantization/observer.py:928: error: Cannot determine type of 'min_val' torch/quantization/observer.py:929: error: Cannot determine type of 'max_val' torch/quantization/observer.py:946: error: Argument "min" to "histc" has incompatible type "Tuple[Tensor, Tensor]"; expected "Union[int, float, bool]" torch/quantization/observer.py:946: error: Argument "max" to "histc" has incompatible type "Tuple[Tensor, Tensor]"; expected "Union[int, float, bool]" torch/quantization/observer.py:1056: error: Module has no attribute "per_tensor_symmetric" torch/quantization/observer.py:1058: error: Module has no attribute "per_channel_symmetric" torch/nn/quantized/functional.py:76: error: Name 'Tensor' is not defined torch/nn/quantized/functional.py:76: error: Name 'BroadcastingList2' is not defined torch/nn/quantized/functional.py:259: error: Name 'Tensor' is not defined torch/nn/quantized/functional.py:259: error: Name 'Optional' is not defined torch/nn/quantized/functional.py:259: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/nn/quantized/functional.py:289: error: Module has no attribute "ops" torch/nn/quantized/functional.py:290: error: Module has no attribute "ops" torch/nn/quantized/functional.py:308: error: Name 'Tensor' is not defined torch/nn/quantized/functional.py:326: error: Name 'Tensor' is not defined torch/nn/quantized/functional.py:356: error: Name 'Tensor' is not defined torch/nn/quantized/functional.py:371: error: Name 'Tensor' is not defined torch/nn/quantized/functional.py:400: error: Name 'Tensor' is not defined torch/nn/quantized/functional.py:400: error: Name 'Optional' is not defined torch/nn/quantized/functional.py:400: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/nn/quantized/functional.py:430: error: Name 'Tensor' is not defined torch/nn/quantized/functional.py:448: error: Name 'Tensor' is not defined torch/nn/quantized/modules/linear.py:26: error: Module has no attribute "ops" torch/nn/quantized/modules/linear.py:28: error: Module has no attribute "ops" torch/nn/quantized/modules/functional_modules.py:40: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:47: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:54: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:61: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:68: error: Name 'List' is not defined torch/nn/quantized/modules/functional_modules.py:68: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List") torch/nn/quantized/modules/functional_modules.py:68: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:75: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:140: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:146: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:151: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:157: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:162: error: Name 'List' is not defined torch/nn/quantized/modules/functional_modules.py:162: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List") torch/nn/quantized/modules/functional_modules.py:162: error: Name 'Tensor' is not defined torch/nn/quantized/modules/functional_modules.py:168: error: Name 'Tensor' is not defined torch/multiprocessing/spawn.py:9: error: Module 'torch.multiprocessing' has no attribute '_prctl_pr_set_pdeathsig' torch/multiprocessing/__init__.py:28: error: Module has no attribute "__all__" torch/jit/frontend.py:9: error: Cannot find implementation or library stub for module named 'torch._C._jit_tree_views' torch/jit/annotations.py:6: error: Module 'torch._jit_internal' has no attribute 'BroadcastingList2'; maybe "BroadcastingList1" or "BroadcastingListCls"? torch/jit/annotations.py:6: error: Module 'torch._jit_internal' has no attribute 'BroadcastingList3'; maybe "BroadcastingList1" or "BroadcastingListCls"? torch/jit/annotations.py:9: error: Cannot find implementation or library stub for module named 'torch._C' torch/distributions/distribution.py:16: error: Need type annotation for 'arg_constraints' (hint: "arg_constraints: Dict[<type>, <type>] = ...") torch/distributions/distribution.py:74: error: Name 'arg_constraints' already defined on line 16 torch/distributions/distribution.py:84: error: Name 'support' already defined on line 15 torch/functional.py:114: error: Name 'Tuple' is not defined torch/functional.py:114: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/functional.py:114: error: Name 'Optional' is not defined torch/functional.py:114: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/functional.py:189: error: Incompatible types in assignment (expression has type "None", variable has type "Tensor") torch/functional.py:200: error: Argument 1 to "_indices_product" has incompatible type "Tuple[int, ...]"; expected "List[int]" torch/functional.py:204: error: No overload variant of "__setitem__" of "list" matches argument types "Tensor", "int" torch/functional.py:204: note: Possible overload variants: torch/functional.py:204: note: def __setitem__(self, int, int) -> None torch/functional.py:204: note: def __setitem__(self, slice, Iterable[int]) -> None torch/functional.py:204: error: No overload variant of "__getitem__" of "list" matches argument type "Tensor" torch/functional.py:204: note: def __getitem__(self, int) -> int torch/functional.py:204: note: def __getitem__(self, slice) -> List[int] torch/functional.py:207: error: "Tensor" has no attribute "copy_" torch/functional.py:212: error: No overload variant of "__setitem__" of "list" matches argument types "Tensor", "int" torch/functional.py:212: note: Possible overload variants: torch/functional.py:212: note: def __setitem__(self, int, int) -> None torch/functional.py:212: note: def __setitem__(self, slice, Iterable[int]) -> None torch/functional.py:212: error: No overload variant of "__getitem__" of "list" matches argument type "Tensor" torch/functional.py:212: note: def __getitem__(self, int) -> int torch/functional.py:212: note: def __getitem__(self, slice) -> List[int] torch/functional.py:215: error: Incompatible types in assignment (expression has type "None", variable has type "Tensor") torch/functional.py:334: error: Name 'Optional' is not defined torch/functional.py:334: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/functional.py:429: error: Argument 2 to "pad" has incompatible type "Tuple[int, int]"; expected "List[int]" torch/functional.py:431: error: Module has no attribute "stft" torch/functional.py:766: error: Module has no attribute "cdist" torch/functional.py:768: error: Module has no attribute "cdist" torch/functional.py:770: error: Module has no attribute "cdist" torch/functional.py:775: error: Name 'Optional' is not defined torch/functional.py:775: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/functional.py:780: error: Name 'Optional' is not defined torch/functional.py:780: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/functional.py:780: error: Name 'number' is not defined torch/functional.py:780: error: Name 'norm' already defined on line 775 torch/functional.py:785: error: Name 'Optional' is not defined torch/functional.py:785: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/functional.py:785: error: Name 'number' is not defined torch/functional.py:785: error: Name 'norm' already defined on line 775 torch/functional.py:790: error: Name 'Optional' is not defined torch/functional.py:790: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/functional.py:790: error: Name 'norm' already defined on line 775 torch/functional.py:795: error: Name 'norm' already defined on line 775 torch/functional.py:960: error: Name 'Any' is not defined torch/functional.py:960: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Any") torch/functional.py:960: error: Name 'Tuple' is not defined torch/functional.py:960: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/functional.py:1036: error: Argument 1 to "len" has incompatible type "int"; expected "Sized" torch/functional.py:1041: error: Name 'Optional' is not defined torch/functional.py:1041: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/functional.py:1041: error: Name 'Tuple' is not defined torch/functional.py:1041: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/functional.py:1056: error: Name 'Optional' is not defined torch/functional.py:1056: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/functional.py:1056: error: Name 'Tuple' is not defined torch/functional.py:1056: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Tuple") torch/distributions/von_mises.py:87: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/distributions/negative_binomial.py:25: error: Incompatible types in assignment (expression has type "_IntegerGreaterThan", base class "Distribution" defined the type as "None") torch/distributions/multivariate_normal.py:116: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/distributions/laplace.py:23: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/distributions/independent.py:34: error: Need type annotation for 'arg_constraints' (hint: "arg_constraints: Dict[<type>, <type>] = ...") torch/distributions/cauchy.py:28: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/distributions/poisson.py:28: error: Incompatible types in assignment (expression has type "_IntegerGreaterThan", base class "Distribution" defined the type as "None") torch/distributions/one_hot_categorical.py:32: error: Incompatible types in assignment (expression has type "_Simplex", base class "Distribution" defined the type as "None") torch/distributions/normal.py:27: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/distributions/lowrank_multivariate_normal.py:79: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/distributions/gamma.py:30: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None") torch/distributions/exponential.py:23: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None") torch/distributions/fishersnedecor.py:25: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None") torch/distributions/dirichlet.py:44: error: Incompatible types in assignment (expression has type "_Simplex", base class "Distribution" defined the type as "None") torch/nn/quantized/dynamic/modules/rnn.py:230: error: Incompatible types in assignment (expression has type "int", variable has type "Tensor") torch/nn/quantized/dynamic/modules/rnn.py:232: error: Incompatible types in assignment (expression has type "int", variable has type "Tensor") torch/nn/quantized/dynamic/modules/rnn.py:236: error: Incompatible return value type (got "Tuple[Any, Tensor, Any]", expected "Tuple[int, int, int]") torch/nn/quantized/dynamic/modules/rnn.py:351: error: Incompatible types in assignment (expression has type "Type[LSTM]", base class "RNNBase" defined the type as "Type[RNNBase]") torch/nn/quantized/dynamic/modules/rnn.py:381: error: Module has no attribute "quantized_lstm" torch/nn/quantized/dynamic/modules/rnn.py:385: error: Module has no attribute "quantized_lstm" torch/nn/quantized/dynamic/modules/rnn.py:414: error: Argument 1 to "forward_impl" of "LSTM" has incompatible type "PackedSequence"; expected "Tensor" torch/nn/quantized/dynamic/modules/rnn.py:416: error: Incompatible types in assignment (expression has type "PackedSequence", variable has type "Tensor") torch/nn/quantized/dynamic/modules/rnn.py:418: error: Incompatible return value type (got "Tuple[Tensor, Tuple[Tensor, Tensor]]", expected "Tuple[PackedSequence, Tuple[Tensor, Tensor]]") torch/nn/quantized/dynamic/modules/rnn.py:420: error: Argument 1 of "permute_hidden" is incompatible with supertype "RNNBase"; supertype defines the argument type as "Tensor" torch/nn/quantized/dynamic/modules/rnn.py:420: error: Return type "Tuple[Tensor, Tensor]" of "permute_hidden" incompatible with return type "Tensor" in supertype "RNNBase" torch/nn/quantized/dynamic/modules/rnn.py:426: error: Argument 2 of "check_forward_args" is incompatible with supertype "RNNBase"; supertype defines the argument type as "Tensor" torch/nn/intrinsic/qat/modules/conv_fused.py:232: error: Incompatible types in assignment (expression has type "Type[ConvBnReLU2d]", base class "ConvBn2d" defined the type as "Type[ConvBn2d]") torch/distributions/beta.py:27: error: Incompatible types in assignment (expression has type "_Interval", base class "Distribution" defined the type as "None") torch/distributions/geometric.py:31: error: Incompatible types in assignment (expression has type "_IntegerGreaterThan", base class "Distribution" defined the type as "None") torch/distributions/continuous_bernoulli.py:38: error: Incompatible types in assignment (expression has type "_Interval", base class "Distribution" defined the type as "None") torch/distributions/bernoulli.py:30: error: Incompatible types in assignment (expression has type "_Boolean", base class "Distribution" defined the type as "None") torch/quantization/fake_quantize.py:126: error: Module has no attribute "per_tensor_symmetric" torch/quantization/fake_quantize.py:132: error: Module has no attribute "per_channel_symmetric" torch/distributions/transformed_distribution.py:41: error: Need type annotation for 'arg_constraints' (hint: "arg_constraints: Dict[<type>, <type>] = ...") torch/jit/__init__.py:1: error: Cannot find implementation or library stub for module named 'torch._C' torch/jit/__init__.py:15: error: Module 'torch.utils' has no attribute 'set_module' torch/jit/__init__.py:70: error: Name 'Attribute' already defined on line 68 torch/jit/__init__.py:213: error: On Python 3 '{}'.format(b'abc') produces "b'abc'"; use !r if this is a desired behavior torch/jit/__init__.py:215: error: On Python 3 '{}'.format(b'abc') produces "b'abc'"; use !r if this is a desired behavior torch/jit/__init__.py:1524: error: Unsupported dynamic base class "with_metaclass" torch/jit/__init__.py:1869: error: Name 'ScriptModule' already defined on line 1524 torch/jit/__init__.py:1998: error: Need type annotation for '_jit_caching_layer' torch/jit/__init__.py:1999: error: Need type annotation for '_jit_function_overload_caching' torch/distributions/relaxed_categorical.py:34: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/distributions/relaxed_categorical.py:108: error: Incompatible types in assignment (expression has type "_Simplex", base class "Distribution" defined the type as "None") torch/distributions/relaxed_bernoulli.py:31: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/distributions/relaxed_bernoulli.py:114: error: Incompatible types in assignment (expression has type "_Interval", base class "Distribution" defined the type as "None") torch/distributions/logistic_normal.py:31: error: Incompatible types in assignment (expression has type "_Simplex", base class "Distribution" defined the type as "None") torch/distributions/log_normal.py:26: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None") torch/distributions/half_normal.py:27: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None") torch/distributions/half_cauchy.py:28: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None") torch/distributions/gumbel.py:28: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/nn/quantized/modules/conv.py:18: error: Module 'torch.nn.utils' has no attribute 'fuse_conv_bn_weights' torch/nn/quantized/modules/conv.py:209: error: Name 'Optional' is not defined torch/nn/quantized/modules/conv.py:209: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/nn/quantized/modules/conv.py:214: error: Module has no attribute "ops" torch/nn/quantized/modules/conv.py:321: error: Name 'Optional' is not defined torch/nn/quantized/modules/conv.py:321: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/nn/quantized/modules/conv.py:323: error: Module has no attribute "ops" torch/nn/quantized/modules/conv.py:447: error: Name 'Optional' is not defined torch/nn/quantized/modules/conv.py:447: note: Did you forget to import it from "typing"? (Suggestion: "from typing import Optional") torch/nn/quantized/modules/conv.py:449: error: Module has no attribute "ops" torch/nn/quantized/modules/conv.py:513: error: Name 'nn.modules.conv._ConvTransposeNd' is not defined torch/nn/quantized/modules/conv.py:525: error: Name 'List' is not defined torch/nn/quantized/modules/conv.py:525: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List") torch/nn/quantized/modules/conv.py:527: error: Name 'List' is not defined torch/nn/quantized/modules/conv.py:527: note: Did you forget to import it from "typing"? (Suggestion: "from typing import List") torch/nn/intrinsic/quantized/modules/conv_relu.py:8: error: Module 'torch.nn.utils' has no attribute 'fuse_conv_bn_weights' torch/nn/intrinsic/quantized/modules/conv_relu.py:21: error: Incompatible types in assignment (expression has type "Type[ConvReLU2d]", base class "Conv2d" defined the type as "Type[Conv2d]") torch/nn/intrinsic/quantized/modules/conv_relu.py:62: error: Incompatible types in assignment (expression has type "Type[ConvReLU3d]", base class "Conv3d" defined the type as "Type[Conv3d]") torch/distributions/weibull.py:25: error: Incompatible types in assignment (expression has type "_GreaterThan", base class "Distribution" defined the type as "None") torch/distributions/kl.py:35: error: Need type annotation for '_KL_MEMOIZE' (hint: "_KL_MEMOIZE: Dict[<type>, <type>] = ...") torch/distributions/studentT.py:27: error: Incompatible types in assignment (expression has type "_Real", base class "Distribution" defined the type as "None") torch/distributions/mixture_same_family.py:48: error: Need type annotation for 'arg_constraints' (hint: "arg_constraints: Dict[<type>, <type>] = ...") torch/distributions/__init__.py:158: error: Name 'transforms' is not defined torch/onnx/utils.py:21: error: Cannot find implementation or library stub for module named 'torch._C' torch/distributed/rendezvous.py:4: error: Cannot find implementation or library stub for module named 'urlparse' torch/distributed/rendezvous.py:4: error: Name 'urlparse' already defined (possibly by an import) torch/distributed/rendezvous.py:4: error: Name 'urlunparse' already defined (possibly by an import) torch/distributed/rendezvous.py:9: error: Module 'torch.distributed' has no attribute 'FileStore' torch/distributed/rendezvous.py:9: error: Module 'torch.distributed' has no attribute 'TCPStore' torch/distributed/rendezvous.py:65: error: On Python 3 '{}'.format(b'abc') produces "b'abc'"; use !r if this is a desired behavior torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'AllreduceOptions'; maybe "ReduceOptions" or "AllreduceCoalescedOptions"? torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'AllreduceCoalescedOptions'; maybe "AllreduceOptions"? torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'AllToAllOptions' torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'BroadcastOptions' torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'GatherOptions'; maybe "ScatterOptions"? torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'ReduceOptions'; maybe "AllreduceOptions", "ReduceScatterOptions", or "ReduceOp"? torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'ReduceScatterOptions'; maybe "ScatterOptions" or "ReduceOptions"? torch/distributed/distributed_c10d.py:11: error: Module 'torch.distributed' has no attribute 'ScatterOptions'; maybe "ReduceScatterOptions" or Pull Request resolved: https://github.com/pytorch/pytorch/pull/36584 Reviewed By: seemethere, ailzhang Differential Revision: D21155985 Pulled By: ezyang fbshipit-source-id: f628d4293992576207167e7c417998fad15898d1
563 lines
25 KiB
Python
563 lines
25 KiB
Python
r""" Functional interface (quantized)."""
|
|
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from __future__ import unicode_literals
|
|
|
|
from typing import List, Optional
|
|
|
|
import torch
|
|
from torch import Tensor
|
|
from torch.nn.modules.utils import _pair, _triple
|
|
|
|
# Although some of the functions and docstrings are mirrored from the torch.nn,
|
|
# we want to have them here for future changes.
|
|
|
|
def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
|
|
count_include_pad=True, divisor_override=None):
|
|
r"""
|
|
Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
|
|
:math:`sH \times sW` steps. The number of output features is equal to the number of
|
|
input planes.
|
|
|
|
.. note:: The input quantization parameters propagate to the output.
|
|
|
|
See :class:`~torch.nn.quantized.AvgPool2d` for details and output shape.
|
|
|
|
Args:
|
|
input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
|
|
kernel_size: size of the pooling region. Can be a single number or a
|
|
tuple `(kH, kW)`
|
|
stride: stride of the pooling operation. Can be a single number or a
|
|
tuple `(sH, sW)`. Default: :attr:`kernel_size`
|
|
padding: implicit zero paddings on both sides of the input. Can be a
|
|
single number or a tuple `(padH, padW)`. Default: 0
|
|
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
|
|
to compute the output shape. Default: ``False``
|
|
count_include_pad: when True, will include the zero-padding in the
|
|
averaging calculation. Default: ``True``
|
|
divisor_override: if specified, it will be used as divisor, otherwise
|
|
size of the pooling region will be used. Default: None
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.avg_pool2d' must be quantized!")
|
|
return torch.nn.functional.avg_pool2d(input, kernel_size, stride, padding,
|
|
ceil_mode, count_include_pad,
|
|
divisor_override)
|
|
|
|
def avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
|
|
count_include_pad=True, divisor_override=None):
|
|
r"""
|
|
Applies 3D average-pooling operation in :math:`kD \ times kH \times kW` regions by step size
|
|
:math:`sD \times sH \times sW` steps. The number of output features is equal to the number of
|
|
input planes.
|
|
|
|
.. note:: The input quantization parameters propagate to the output.
|
|
|
|
Args:
|
|
input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
|
|
kernel_size: size of the pooling region. Can be a single number or a
|
|
tuple `(kD, kH, kW)`
|
|
stride: stride of the pooling operation. Can be a single number or a
|
|
tuple `(sD, sH, sW)`. Default: :attr:`kernel_size`
|
|
padding: implicit zero paddings on both sides of the input. Can be a
|
|
single number or a tuple `(padD, padH, padW)`. Default: 0
|
|
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
|
|
to compute the output shape. Default: ``False``
|
|
count_include_pad: when True, will include the zero-padding in the
|
|
averaging calculation. Default: ``True``
|
|
divisor_override: if specified, it will be used as divisor, otherwise
|
|
size of the pooling region will be used. Default: None
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.avg_pool3d' must be quantized!")
|
|
return torch.nn.functional.avg_pool3d(input, kernel_size, stride, padding,
|
|
ceil_mode, count_include_pad,
|
|
divisor_override)
|
|
|
|
def adaptive_avg_pool2d(input, output_size):
|
|
# type: (Tensor, BroadcastingList2[int]) -> Tensor
|
|
r"""
|
|
Applies a 2D adaptive average pooling over a quantized input signal composed
|
|
of several quantized input planes.
|
|
|
|
.. note:: The input quantization paramteres propagate to the output.
|
|
|
|
See :class:`~torch.nn.quantized.AdaptiveAvgPool2d` for details and output shape.
|
|
|
|
Args:
|
|
output_size: the target output size (single integer or
|
|
double-integer tuple)
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.adaptive_avg_pool2d' must be quantized!")
|
|
return torch.nn.functional.adaptive_avg_pool2d(input, output_size)
|
|
|
|
def conv2d(input, weight, bias,
|
|
stride=1, padding=0, dilation=1, groups=1,
|
|
padding_mode='zeros',
|
|
scale=1.0, zero_point=0,
|
|
dtype=torch.quint8):
|
|
r"""
|
|
Applies a 2D convolution over a quantized 2D input composed of several input
|
|
planes.
|
|
|
|
See :class:`~torch.nn.quantized.Conv2d` for details and output shape.
|
|
|
|
Args:
|
|
input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
|
|
weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
|
|
bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
|
|
stride: the stride of the convolving kernel. Can be a single number or a
|
|
tuple `(sH, sW)`. Default: 1
|
|
padding: implicit paddings on both sides of the input. Can be a
|
|
single number or a tuple `(padH, padW)`. Default: 0
|
|
dilation: the spacing between kernel elements. Can be a single number or
|
|
a tuple `(dH, dW)`. Default: 1
|
|
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
|
|
number of groups. Default: 1
|
|
padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
|
|
scale: quantization scale for the output. Default: 1.0
|
|
zero_point: quantization zero_point for the output. Default: 0
|
|
dtype: quantization data type to use. Default: ``torch.quint8``
|
|
|
|
Examples::
|
|
|
|
>>> from torch.nn.quantized import functional as qF
|
|
>>> filters = torch.randn(8, 4, 3, 3, dtype=torch.float)
|
|
>>> inputs = torch.randn(1, 4, 5, 5, dtype=torch.float)
|
|
>>> bias = torch.randn(8, dtype=torch.float)
|
|
>>>
|
|
>>> scale, zero_point = 1.0, 0
|
|
>>> dtype_inputs = torch.quint8
|
|
>>> dtype_filters = torch.qint8
|
|
>>>
|
|
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
|
|
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
|
|
>>> qF.conv2d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
|
|
""" # noqa: E501
|
|
if padding_mode != 'zeros':
|
|
raise NotImplementedError("Only zero-padding is supported!")
|
|
if input.dtype != torch.quint8:
|
|
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
|
|
if weight.dtype != torch.qint8:
|
|
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
|
|
if input.ndim != 4:
|
|
raise ValueError("Input shape must be `(N, C, H, W)`!")
|
|
stride = _pair(stride)
|
|
padding = _pair(padding)
|
|
dilation = _pair(dilation)
|
|
|
|
prepacked_weight = torch.ops.quantized.conv2d_prepack(
|
|
weight, bias, stride, padding, dilation, groups)
|
|
return torch.ops.quantized.conv2d(input,
|
|
prepacked_weight,
|
|
stride, padding, dilation,
|
|
groups, scale, zero_point)
|
|
|
|
def conv3d(input, weight, bias, stride=1, padding=0, dilation=1, groups=1,
|
|
padding_mode='zeros', scale=1.0, zero_point=0, dtype=torch.quint8):
|
|
r"""
|
|
Applies a 3D convolution over a quantized 3D input composed of several input
|
|
planes.
|
|
|
|
See :class:`~torch.nn.quantized.Conv3d` for details and output shape.
|
|
|
|
Args:
|
|
input: quantized input tensor of shape
|
|
:math:`(\text{minibatch} , \text{in\_channels} , iD , iH , iW)`
|
|
weight: quantized filters of shape
|
|
:math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kD , kH , kW)`
|
|
bias: **non-quantized** bias tensor of shape
|
|
:math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
|
|
stride: the stride of the convolving kernel. Can be a single number or a
|
|
tuple `(sD, sH, sW)`. Default: 1
|
|
padding: implicit paddings on both sides of the input. Can be a
|
|
single number or a tuple `(padD, padH, padW)`. Default: 0
|
|
dilation: the spacing between kernel elements. Can be a single number or
|
|
a tuple `(dD, dH, dW)`. Default: 1
|
|
groups: split input into groups, :math:`\text{in\_channels}` should be
|
|
divisible by the number of groups. Default: 1
|
|
padding_mode: the padding mode to use. Only "zeros" is supported for
|
|
quantized convolution at the moment. Default: "zeros"
|
|
scale: quantization scale for the output. Default: 1.0
|
|
zero_point: quantization zero_point for the output. Default: 0
|
|
dtype: quantization data type to use. Default: ``torch.quint8``
|
|
|
|
Examples::
|
|
|
|
>>> from torch.nn.quantized import functional as qF
|
|
>>> filters = torch.randn(8, 4, 3, 3, 3, dtype=torch.float)
|
|
>>> inputs = torch.randn(1, 4, 5, 5, 5, dtype=torch.float)
|
|
>>> bias = torch.randn(8, dtype=torch.float)
|
|
>>>
|
|
>>> scale, zero_point = 1.0, 0
|
|
>>> dtype_inputs = torch.quint8
|
|
>>> dtype_filters = torch.qint8
|
|
>>>
|
|
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
|
|
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
|
|
>>> qF.conv3d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
|
|
""" # noqa: E501
|
|
if padding_mode != 'zeros':
|
|
raise NotImplementedError("Only zero-padding is supported!")
|
|
if input.dtype != torch.quint8:
|
|
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
|
|
if weight.dtype != torch.qint8:
|
|
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
|
|
if input.ndim != 5:
|
|
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
|
|
stride = _triple(stride)
|
|
padding = _triple(padding)
|
|
dilation = _triple(dilation)
|
|
|
|
prepacked_weight = torch.ops.quantized.conv3d_prepack(
|
|
weight, bias, stride, padding, dilation, groups)
|
|
return torch.ops.quantized.conv3d(
|
|
input, prepacked_weight, stride, padding, dilation, groups, scale,
|
|
zero_point)
|
|
|
|
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
|
|
r"""Down/up samples the input to either the given :attr:`size` or the given
|
|
:attr:`scale_factor`
|
|
|
|
See :func:`torch.nn.functional.interpolate` for implementation details.
|
|
|
|
The input dimensions are interpreted in the form:
|
|
`mini-batch x channels x [optional depth] x [optional height] x width`.
|
|
|
|
.. note:: The input quantization parameters propagate to the output.
|
|
|
|
.. note:: Only 2D/3D input is supported for quantized inputs
|
|
|
|
.. note:: Only the following modes are supported for the quantized inputs:
|
|
|
|
- `bilinear`
|
|
- `nearest`
|
|
|
|
Args:
|
|
input (Tensor): the input tensor
|
|
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
|
|
output spatial size.
|
|
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
|
|
mode (str): algorithm used for upsampling:
|
|
``'nearest'`` | ``'bilinear'``
|
|
align_corners (bool, optional): Geometrically, we consider the pixels of the
|
|
input and output as squares rather than points.
|
|
If set to ``True``, the input and output tensors are aligned by the
|
|
center points of their corner pixels, preserving the values at the corner pixels.
|
|
If set to ``False``, the input and output tensors are aligned by the corner
|
|
points of their corner pixels, and the interpolation uses edge value padding
|
|
for out-of-boundary values, making this operation *independent* of input size
|
|
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
|
|
is ``'bilinear'``.
|
|
Default: ``False``
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.interpolate' must be quantized!")
|
|
return torch.nn.functional.interpolate(input, size, scale_factor, mode,
|
|
align_corners)
|
|
|
|
def linear(input, weight, bias=None, scale=None, zero_point=None):
|
|
# type: (Tensor, Tensor, Optional[Tensor], Optional[float], Optional[int]) -> Tensor
|
|
r"""
|
|
Applies a linear transformation to the incoming quantized data:
|
|
:math:`y = xA^T + b`.
|
|
See :class:`~torch.nn.quantized.Linear`
|
|
|
|
.. note::
|
|
|
|
Current implementation packs weights on every call, which has penalty on performance.
|
|
If you want to avoid the overhead, use :class:`~torch.nn.quantized.Linear`.
|
|
|
|
Args:
|
|
input (Tensor): Quantized input of type `torch.quint8`
|
|
weight (Tensor): Quantized weight of type `torch.qint8`
|
|
bias (Tensor): None or fp32 bias of type `torch.float`
|
|
scale (double): output scale. If None, derived from the input scale
|
|
zero_point (long): output zero point. If None, derived from the input zero_point
|
|
|
|
Shape:
|
|
- Input: :math:`(N, *, in\_features)` where `*` means any number of
|
|
additional dimensions
|
|
- Weight: :math:`(out\_features, in\_features)`
|
|
- Bias: :math:`(out\_features)`
|
|
- Output: :math:`(N, *, out\_features)`
|
|
"""
|
|
if scale is None:
|
|
scale = input.q_scale()
|
|
if zero_point is None:
|
|
zero_point = input.q_zero_point()
|
|
_packed_params = torch.ops.quantized.linear_prepack(weight, bias)
|
|
return torch.ops.quantized.linear(input, _packed_params, scale, zero_point)
|
|
|
|
def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
|
|
ceil_mode=False, return_indices=False):
|
|
r"""Applies a 2D max pooling over a quantized input signal composed of
|
|
several quantized input planes.
|
|
|
|
.. note:: The input quantization parameters are propagated to the output.
|
|
|
|
See :class:`~torch.nn.quantized.MaxPool2d` for details.
|
|
"""
|
|
if return_indices:
|
|
raise NotImplementedError("return_indices is not yet implemented!")
|
|
if stride is None:
|
|
stride = torch.jit.annotate(List[int], [])
|
|
return torch.nn.functional.max_pool2d(input, kernel_size, stride, padding,
|
|
dilation, ceil_mode, return_indices)
|
|
|
|
def relu(input, inplace=False):
|
|
# type: (Tensor, bool) -> Tensor
|
|
r"""relu(input, inplace=False) -> Tensor
|
|
|
|
Applies the rectified linear unit function element-wise.
|
|
See :class:`~torch.nn.quantized.ReLU` for more details.
|
|
|
|
Args:
|
|
input: quantized input
|
|
inplace: perform the computation inplace
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.relu' must be quantized!")
|
|
if inplace:
|
|
return torch.relu_(input)
|
|
else:
|
|
return torch.relu(input)
|
|
|
|
def leaky_relu(input, negative_slope=0.01, inplace=False,
|
|
scale=None, zero_point=None):
|
|
# type: (Tensor, float, bool, float, int) -> Tensor
|
|
r"""
|
|
Quantized version of the.
|
|
leaky_relu(input, negative_slope=0.01, inplace=False, scale, zero_point) -> Tensor
|
|
|
|
Applies element-wise,
|
|
:math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
|
|
|
|
Args:
|
|
input: Quaintized input
|
|
negative_slope: The slope of the negative input
|
|
inplace: Inplace modification of the input tensor
|
|
scale, zero_point: Scale and zero point of thhe output tensor.
|
|
|
|
See :class:`~torch.nn.LeakyReLU` for more details.
|
|
"""
|
|
if scale is not None and zero_point is not None:
|
|
assert not inplace, "Cannot rescale with `inplace`"
|
|
output = torch.quantize_per_tensor(torch.zeros(input.shape),
|
|
scale, int(zero_point), input.dtype)
|
|
torch._C._nn.leaky_relu(input, negative_slope, out=output)
|
|
return output
|
|
if inplace:
|
|
result = torch._C._nn.leaky_relu_(input, negative_slope)
|
|
else:
|
|
result = torch._C._nn.leaky_relu(input, negative_slope)
|
|
return result
|
|
|
|
def hardtanh(input, min_val=-1., max_val=1., inplace=False):
|
|
# type: (Tensor, float, float, bool) -> Tensor
|
|
r"""
|
|
hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor
|
|
|
|
Applies the quantized HardTanh function element-wise, with scale and
|
|
zero-point carried over from the input tensor. See :class:`~torch.nn.Hardtanh`
|
|
for more details.
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.hardtanh' must be quantized!")
|
|
if inplace:
|
|
return torch._C._nn.hardtanh_(input, min_val, max_val)
|
|
return torch._C._nn.hardtanh(input, min_val, max_val)
|
|
|
|
def hardswish(input, scale, zero_point):
|
|
# type: (Tensor, float, int) -> Tensor
|
|
r"""Applies the quantized version of the hardswish function, element-wise,
|
|
as described in the paper:
|
|
|
|
`Searching for MobileNetV3`_.
|
|
|
|
.. math::
|
|
\text{Hardswish}(x) = \begin{cases}
|
|
0 & \text{if~} x \le -3, \\
|
|
x & \text{if~} x \ge +3, \\
|
|
x^2/6 & \text{otherwise}
|
|
\end{cases}
|
|
|
|
Args:
|
|
input: quantized input
|
|
scale, zero_point: Scale and zero point of the output tensor.
|
|
|
|
See :class:`~torch.nn.Hardswish` for more details.
|
|
|
|
.. _`Searching for MobileNetV3`:
|
|
https://arxiv.org/abs/1905.02244
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.hardswish' must be quantized!")
|
|
output = torch.quantize_per_tensor(torch.zeros(input.shape),
|
|
scale, int(zero_point), input.dtype)
|
|
torch._C._nn.hardswish(input, out=output)
|
|
return output
|
|
|
|
def elu(input, alpha=1., inplace=False, scale=None, zero_point=None):
|
|
# type: (Tensor, Optional[float], bool, Optional[float], Optional[int]) -> Tensor
|
|
r"""
|
|
Applies the quantized ELU function element-wise:
|
|
|
|
.. math::
|
|
\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))
|
|
|
|
Args:
|
|
input: quantized input
|
|
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
|
|
inplace: Inplace modification of the input tensor
|
|
scale, zero_point: Scale and zero point of the output tensor.
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.elu' must be quantized!")
|
|
if (scale is not None) != (zero_point is not None):
|
|
raise ValueError("Either both or none of (scale, zero_point) must be specified!")
|
|
|
|
if scale is not None and zero_point is not None:
|
|
assert not inplace, "Cannot rescale with `inplace`"
|
|
output = torch.quantize_per_tensor(torch.zeros(input.shape),
|
|
scale, int(zero_point), input.dtype)
|
|
torch._C._nn.elu(input, alpha, out=output)
|
|
return output
|
|
elif inplace:
|
|
return torch._C._nn.elu_(input, alpha)
|
|
else:
|
|
return torch._C._nn.elu(input, alpha)
|
|
|
|
def hardsigmoid(input):
|
|
# type: (Tensor) -> Tensor
|
|
r"""
|
|
Applies the quantized element-wise function
|
|
|
|
.. math::
|
|
\text{Hardsigmoid}(x) = \begin{cases}
|
|
0 & \text{if~} x \le -3, \\
|
|
1 & \text{if~} x \ge +3, \\
|
|
x / 6 & \text{otherwise}
|
|
\end{cases}
|
|
|
|
See :class:`~torch.nn.Hardsigmoid` for more details.
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.hardsigmoid' must be quantized!")
|
|
return torch._C._nn.hardsigmoid(input)
|
|
|
|
def clamp(input, min_, max_):
|
|
# type: (Tensor, float, float) -> Tensor
|
|
r"""float(input, min_, max_) -> Tensor
|
|
|
|
Applies the clamp function element-wise.
|
|
See :class:`~torch.nn.quantized.clamp` for more details.
|
|
|
|
Args:
|
|
input: quantized input
|
|
min_: minimum value for clamping
|
|
max_: maximum value for clamping
|
|
"""
|
|
if not input.is_quantized:
|
|
raise ValueError("Input to 'quantized.clamp' must be quantized!")
|
|
return torch.clamp(input, min_, max_)
|
|
|
|
def upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
|
|
r"""Upsamples the input to either the given :attr:`size` or the given
|
|
:attr:`scale_factor`
|
|
|
|
.. warning::
|
|
This function is deprecated in favor of
|
|
:func:`torch.nn.quantized.functional.interpolate`.
|
|
This is equivalent with ``nn.quantized.functional.interpolate(...)``.
|
|
|
|
See :func:`torch.nn.functional.interpolate` for implementation details.
|
|
|
|
The input dimensions are interpreted in the form:
|
|
`mini-batch x channels x [optional depth] x [optional height] x width`.
|
|
|
|
.. note:: The input quantization parameters propagate to the output.
|
|
|
|
.. note:: Only 2D input is supported for quantized inputs
|
|
|
|
.. note:: Only the following modes are supported for the quantized inputs:
|
|
|
|
- `bilinear`
|
|
- `nearest`
|
|
|
|
Args:
|
|
input (Tensor): quantized input tensor
|
|
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
|
|
output spatial size.
|
|
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to be an integer.
|
|
mode (string): algorithm used for upsampling:
|
|
``'nearest'`` | ``'bilinear'``
|
|
align_corners (bool, optional): Geometrically, we consider the pixels of the
|
|
input and output as squares rather than points.
|
|
If set to ``True``, the input and output tensors are aligned by the
|
|
center points of their corner pixels, preserving the values at the corner pixels.
|
|
If set to ``False``, the input and output tensors are aligned by the corner
|
|
points of their corner pixels, and the interpolation uses edge value padding
|
|
for out-of-boundary values, making this operation *independent* of input size
|
|
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
|
|
is ``'bilinear'``.
|
|
Default: ``False``
|
|
|
|
.. warning::
|
|
With ``align_corners = True``, the linearly interpolating modes
|
|
(`bilinear`) don't proportionally align the
|
|
output and input pixels, and thus the output values can depend on the
|
|
input size. This was the default behavior for these modes up to version
|
|
0.3.1. Since then, the default behavior is ``align_corners = False``.
|
|
See :class:`~torch.nn.Upsample` for concrete examples on how this
|
|
affects the outputs.
|
|
"""
|
|
warnings.warn("nn.quantized.functional.upsample is deprecated. Use nn.quantized.functional.interpolate instead.")
|
|
return interpolate(input, size, scale_factor, mode, align_corners)
|
|
|
|
def upsample_bilinear(input, size=None, scale_factor=None):
|
|
r"""Upsamples the input, using bilinear upsampling.
|
|
|
|
.. warning::
|
|
This function is deprecated in favor of
|
|
:func:`torch.nn.quantized.functional.interpolate`.
|
|
This is equivalent with
|
|
``nn.quantized.functional.interpolate(..., mode='bilinear', align_corners=True)``.
|
|
|
|
.. note:: The input quantization parameters propagate to the output.
|
|
|
|
.. note:: Only 2D inputs are supported
|
|
|
|
Args:
|
|
input (Tensor): quantized input
|
|
size (int or Tuple[int, int]): output spatial size.
|
|
scale_factor (int or Tuple[int, int]): multiplier for spatial size
|
|
"""
|
|
# DeprecationWarning is ignored by default
|
|
warnings.warn("nn.quantized.functional.upsample_bilinear is deprecated. Use nn.quantized.functional.interpolate instead.")
|
|
return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True)
|
|
|
|
def upsample_nearest(input, size=None, scale_factor=None):
|
|
r"""Upsamples the input, using nearest neighbours' pixel values.
|
|
|
|
.. warning::
|
|
This function is deprecated in favor of
|
|
:func:`torch.nn.quantized.functional.interpolate`.
|
|
This is equivalent with ``nn.quantized.functional.interpolate(..., mode='nearest')``.
|
|
|
|
.. note:: The input quantization parameters propagate to the output.
|
|
|
|
.. note:: Only 2D inputs are supported
|
|
|
|
Args:
|
|
input (Tensor): quantized input
|
|
size (int or Tuple[int, int] or Tuple[int, int, int]): output spatial
|
|
size.
|
|
scale_factor (int): multiplier for spatial size. Has to be an integer.
|
|
"""
|
|
# DeprecationWarning is ignored by default
|
|
warnings.warn("nn.quantized.functional.upsample_nearest is deprecated. Use nn.quantized.functional.interpolate instead.")
|
|
return interpolate(input, size, scale_factor, mode='nearest')
|