mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Fix typos in messages under torch (#89049)
This PR fixes typos of messages in `.py` files under torch directory. Only in `torch/onnx/symbolic_opset16.py`, fix a typo in comment to make the operator name correct. Pull Request resolved: https://github.com/pytorch/pytorch/pull/89049 Approved by: https://github.com/lezcano
This commit is contained in:
parent
d1f48f05ce
commit
1cd6ebe095
|
|
@ -595,7 +595,7 @@ def _nll_loss_nd(
|
|||
) -> TensorLikeType:
|
||||
utils.check(
|
||||
input.ndim > 0 and input.ndim <= 3,
|
||||
lambda: f"Expected input dimension to be either [1, 2, 3] but recieved {input.ndim}.",
|
||||
lambda: f"Expected input dimension to be either [1, 2, 3] but received {input.ndim}.",
|
||||
)
|
||||
|
||||
utils.check(
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class LinearBn1d(nn.modules.linear.Linear, nni._FusedModule):
|
|||
freeze_bn=False,
|
||||
qconfig=None):
|
||||
nn.modules.linear.Linear.__init__(self, in_features, out_features, bias)
|
||||
assert qconfig, 'qconfig must be provded for QAT module'
|
||||
assert qconfig, 'qconfig must be provided for QAT module'
|
||||
self.qconfig = qconfig
|
||||
self.freeze_bn = freeze_bn if self.training else True
|
||||
self.bn = nn.BatchNorm1d(out_features, eps, momentum, True, True)
|
||||
|
|
|
|||
|
|
@ -385,7 +385,7 @@ class ModelReport:
|
|||
module_fqns_to_features[module_fqn] = {**new_info, **present_info}
|
||||
else:
|
||||
error_str = "You have the same key with different values across detectors. "
|
||||
error_str += "Someone incorrectly implemented a detector with conflicting keys to exisiting detectors."
|
||||
error_str += "Someone incorrectly implemented a detector with conflicting keys to existing detectors."
|
||||
raise ValueError(error_str)
|
||||
else:
|
||||
# we just set it
|
||||
|
|
|
|||
|
|
@ -1019,7 +1019,7 @@ class HistogramObserver(UniformQuantizationObserverBase):
|
|||
This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in
|
||||
caffe2/quantization/server/norm_minimization.cc
|
||||
"""
|
||||
assert self.histogram.size()[0] == self.bins, "bins mistmatch"
|
||||
assert self.histogram.size()[0] == self.bins, "bins mismatch"
|
||||
bin_width = (self.max_val - self.min_val) / self.bins
|
||||
|
||||
# cumulative sum
|
||||
|
|
|
|||
|
|
@ -598,7 +598,7 @@ def create_args(parser=None):
|
|||
_add_multi_instance_params(parser)
|
||||
# positional
|
||||
parser.add_argument("program", type=str,
|
||||
help="The full path to the proram/script to be launched. "
|
||||
help="The full path to the program/script to be launched. "
|
||||
"followed by all the arguments for the script")
|
||||
|
||||
# rest from the training program
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None
|
|||
if not isinstance(stream, int):
|
||||
raise TypeError('Invalid type for stream argument, must be '
|
||||
'`torch.cuda.Stream` or `int` representing a pointer '
|
||||
'to a exisiting stream')
|
||||
'to a existing stream')
|
||||
with torch.cuda.device(device):
|
||||
return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)
|
||||
|
||||
|
|
|
|||
|
|
@ -335,7 +335,7 @@ if __name__ == "__main__":
|
|||
"--embedding-dim",
|
||||
type=int,
|
||||
default=EMBEDDING_DIM,
|
||||
help="Number of embedding dimentions.",
|
||||
help="Number of embedding dimensions.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--warmup-cycles",
|
||||
|
|
|
|||
|
|
@ -537,7 +537,7 @@ class MultiprocessContext(PContext):
|
|||
for proc in self._pc.processes:
|
||||
if proc.is_alive():
|
||||
log.warning(
|
||||
f"Unable to shutdown process {proc.pid} via {death_sig}, forcefully exitting via {_get_kill_signal()}"
|
||||
f"Unable to shutdown process {proc.pid} via {death_sig}, forcefully exiting via {_get_kill_signal()}"
|
||||
)
|
||||
try:
|
||||
os.kill(proc.pid, _get_kill_signal())
|
||||
|
|
@ -714,7 +714,7 @@ class SubprocessContext(PContext):
|
|||
for handler in self.subprocess_handlers.values():
|
||||
if handler.proc.poll() is None:
|
||||
log.warning(
|
||||
f"Unable to shutdown process {handler.proc.pid} via {death_sig}, forcefully exitting via {_get_kill_signal()}"
|
||||
f"Unable to shutdown process {handler.proc.pid} via {death_sig}, forcefully exiting via {_get_kill_signal()}"
|
||||
)
|
||||
handler.close(death_sig=_get_kill_signal())
|
||||
handler.proc.wait()
|
||||
|
|
|
|||
|
|
@ -293,7 +293,7 @@ class EtcdRendezvous(object):
|
|||
time.sleep(1)
|
||||
|
||||
except RendezvousTimeoutError:
|
||||
log.info("Rendezvous timeout occured in EtcdRendezvousHandler")
|
||||
log.info("Rendezvous timeout occurred in EtcdRendezvousHandler")
|
||||
raise
|
||||
|
||||
except RendezvousClosedError:
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class MixtureSameFamily(Distribution):
|
|||
|
||||
if not isinstance(self._mixture_distribution, Categorical):
|
||||
raise ValueError(" The Mixture distribution needs to be an "
|
||||
" instance of torch.distribtutions.Categorical")
|
||||
" instance of torch.distributions.Categorical")
|
||||
|
||||
if not isinstance(self._component_distribution, Distribution):
|
||||
raise ValueError("The Component distribution need to be an "
|
||||
|
|
|
|||
|
|
@ -696,7 +696,7 @@ class Partitioner:
|
|||
return find_combination, partitions
|
||||
|
||||
def reset_partition_in_sparse_nn(partition, new_partition=True):
|
||||
"""If crossing the boudary between non-embedding nodes and
|
||||
"""If crossing the boundary between non-embedding nodes and
|
||||
embedding nodes, create a new partition
|
||||
"""
|
||||
if in_embedding_region:
|
||||
|
|
|
|||
|
|
@ -184,7 +184,7 @@ def get_attr_inference_rule(n: Node, traced):
|
|||
if attr_name == "shape":
|
||||
n.type = Dyn
|
||||
else:
|
||||
raise TypeError("Not yet implelemted")
|
||||
raise TypeError("Not yet implemented")
|
||||
|
||||
# TODO. We leave it like this till we add a type to represent tensor sizes
|
||||
return n.type
|
||||
|
|
@ -507,7 +507,7 @@ def flatten_check(tensor_type, start_dim, end_dim):
|
|||
new_type_list = lhs + mid + rhs
|
||||
return TensorType(tuple(new_type_list))
|
||||
else:
|
||||
raise TypeError(f'Incompatable dimentions {start_dim}, {end_dim - 1} in type {tensor_type}')
|
||||
raise TypeError(f'Incompatable dimensions {start_dim}, {end_dim - 1} in type {tensor_type}')
|
||||
|
||||
@register_inference_rule(torch.flatten)
|
||||
def flatten_inference_rule(n: Node):
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ class Partition:
|
|||
f" nodes: {self.node_names},\n"
|
||||
f" inputs: {self.inputs},\n"
|
||||
f" outputs: {self.outputs},\n"
|
||||
f" partitions depenent on: {self.partitions_dependent_on},\n"
|
||||
f" parition dependents: {self.partition_dependents}"
|
||||
f" partitions dependent on: {self.partitions_dependent_on},\n"
|
||||
f" partition dependents: {self.partition_dependents}"
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -614,7 +614,7 @@ class StmtBuilder(Builder):
|
|||
else:
|
||||
raise NotSupportedError(
|
||||
find_before(ctx, rhs.range().start, '=', offsets=(-1, 0)),
|
||||
"unsupported kind of augumented assignment: " + op.__name__)
|
||||
"unsupported kind of augmented assignment: " + op.__name__)
|
||||
return AugAssign(lhs, op_token, rhs)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
|
|
@ -242,7 +242,7 @@ class ParametrizationList(ModuleList):
|
|||
if len(value) != self.ntensors:
|
||||
raise ValueError(
|
||||
"'right_inverse' must return a sequence of tensors of length "
|
||||
f"{self.ntensors}. Got a sequence of lenght {len(value)}."
|
||||
f"{self.ntensors}. Got a sequence of length {len(value)}."
|
||||
)
|
||||
for i, tensor in enumerate(value):
|
||||
original_i = getattr(self, f"original{i}")
|
||||
|
|
|
|||
|
|
@ -1308,7 +1308,7 @@ def _index_fill_reshape_helper(g: jit_utils.GraphContext, self, dim, index):
|
|||
from torch.onnx.symbolic_opset11 import scatter # type: ignore[no-redef]
|
||||
|
||||
if self.type().dim() is None:
|
||||
return _unimplemented("index_fill", "input rank not accesible")
|
||||
return _unimplemented("index_fill", "input rank not accessible")
|
||||
self_dim = self.type().dim()
|
||||
dim_value = _parse_arg(dim, "i")
|
||||
unsqueezed_index = _unsqueeze_helper(
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ Updated operators:
|
|||
PRelu
|
||||
RoiAlign
|
||||
Scan
|
||||
ScatterElemenets
|
||||
ScatterElements
|
||||
ScatterND
|
||||
Where
|
||||
GreaterOrEqual
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ class ExtraCUDACopyPattern(Pattern):
|
|||
def __init__(self, prof: profile, should_benchmark: bool = False):
|
||||
super().__init__(prof, should_benchmark)
|
||||
self.name = "Extra CUDA Copy Pattern"
|
||||
self.description = "Filled a CPU tensor and immediately moved it to GPU. Please initalize it on GPU."
|
||||
self.description = "Filled a CPU tensor and immediately moved it to GPU. Please initialize it on GPU."
|
||||
self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#create-tensors-directly-on-the-target-device"
|
||||
self.init_ops = {
|
||||
"aten::fill_", "aten::zero_", "aten::normal_", "aten::uniform_"
|
||||
|
|
|
|||
|
|
@ -773,7 +773,7 @@ def load(
|
|||
|
||||
if weights_only:
|
||||
if pickle_module is not None:
|
||||
raise RuntimeError("Can not safely load weights when expiclit picke_module is specified")
|
||||
raise RuntimeError("Can not safely load weights when explicit picke_module is specified")
|
||||
else:
|
||||
if pickle_module is None:
|
||||
pickle_module = pickle
|
||||
|
|
|
|||
|
|
@ -333,7 +333,7 @@ def skip_if_rocm(func):
|
|||
def skip_if_win32():
|
||||
return sandcastle_skip_if(
|
||||
sys.platform == "win32",
|
||||
"This unit test case is not supportted on Windows platform",
|
||||
"This unit test case is not supported on Windows platform",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -311,7 +311,7 @@ def generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
|
|||
|
||||
def raise_composite_compliance_error(err, additional_info=''):
|
||||
raise RuntimeError(
|
||||
"Composite compilance check failed with "
|
||||
"Composite compliance check failed with "
|
||||
"the above error.\n"
|
||||
f"{additional_info}"
|
||||
"If you are adding an OpInfo of an "
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ def main():
|
|||
print()
|
||||
|
||||
# More string munging to make pretty output.
|
||||
print(f"Average attemts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}")
|
||||
print(f"Average attempts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}")
|
||||
|
||||
def time_fn(m):
|
||||
return m.median / m.metadata["numel"]
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ def main():
|
|||
print()
|
||||
|
||||
# More string munging to make pretty output.
|
||||
print(f"Average attemts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}")
|
||||
print(f"Average attempts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}")
|
||||
|
||||
def time_fn(m):
|
||||
return m.mean / m.metadata["nnz"]
|
||||
|
|
|
|||
|
|
@ -408,7 +408,7 @@ class CaptureDataFrameWithDataPipeOps(CaptureDataFrame):
|
|||
|
||||
def __getattr__(self, attrname): # ?
|
||||
if attrname in UNIMPLEMENTED_ATTR:
|
||||
raise AttributeError('Attemping to get ', attrname)
|
||||
raise AttributeError('Attempting to get ', attrname)
|
||||
if attrname in DATAPIPES_OPS:
|
||||
return (self.as_datapipe()).__getattr__(attrname)
|
||||
return super().__getattr__(attrname)
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ def _collate_helper(conversion, item):
|
|||
import torcharrow.pytorch as tap # type: ignore[import]
|
||||
collation_fn = tap.rec.Default()
|
||||
except Exception:
|
||||
raise Exception("unable to import default collation function from the TorchArrrow")
|
||||
raise Exception("unable to import default collation function from the TorchArrow")
|
||||
|
||||
tuple_names.append(str(name))
|
||||
value = collation_fn(df[name])
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user