mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
fix lint after new flake8 release added new style constraints (#13047)
Summary: fix lint after new flake8 release added new style constraints Pull Request resolved: https://github.com/pytorch/pytorch/pull/13047 Differential Revision: D10527804 Pulled By: soumith fbshipit-source-id: 6f4d02662570b6339f69117b61037c8394b0bbd8
This commit is contained in:
parent
d72de9fb1e
commit
cf235e0894
|
|
@ -3403,7 +3403,7 @@ a")
|
|||
if True:
|
||||
x = [1, 2, 3]
|
||||
return
|
||||
with self.assertRaisesRegex(RuntimeError, "previously has type Tensor\[\]"):
|
||||
with self.assertRaisesRegex(RuntimeError, r"previously has type Tensor\[\]"):
|
||||
self.checkScript(reassign_from_empty_literal, (), optimize=False)
|
||||
|
||||
def reassign_from_empty_builtin():
|
||||
|
|
@ -5967,7 +5967,7 @@ a")
|
|||
def f4(a):
|
||||
torch.cat(a)
|
||||
|
||||
with self.assertRaisesRegex(RuntimeError, 'argument \'tensors\' but found int\[\]'):
|
||||
with self.assertRaisesRegex(RuntimeError, r'argument \'tensors\' but found int\[\]'):
|
||||
@torch.jit.script
|
||||
def f5(a):
|
||||
torch.cat([3])
|
||||
|
|
@ -6295,7 +6295,7 @@ a")
|
|||
return x
|
||||
|
||||
def test_for_range_no_arg(self):
|
||||
with self.assertRaisesRegex(RuntimeError, 'range\(\) expects 1 argument but got 0'):
|
||||
with self.assertRaisesRegex(RuntimeError, r'range\(\) expects 1 argument but got 0'):
|
||||
@torch.jit.script
|
||||
def range_no_arg(x):
|
||||
for i in range():
|
||||
|
|
@ -8836,21 +8836,21 @@ class TestCustomOperators(JitTestCase):
|
|||
def test_passing_too_many_args(self):
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"aten::relu\(\) expected at most 1 argument\(s\) but received 2 argument\(s\)"
|
||||
r"aten::relu\(\) expected at most 1 argument\(s\) but received 2 argument\(s\)"
|
||||
):
|
||||
torch.ops.aten.relu(1, 2)
|
||||
|
||||
def test_passing_too_few_args(self):
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"aten::relu\(\) is missing value for argument 'self'."
|
||||
r"aten::relu\(\) is missing value for argument 'self'."
|
||||
):
|
||||
torch.ops.aten.relu()
|
||||
|
||||
def test_passing_one_positional_but_not_the_second(self):
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"aten::transpose\(\) is missing value for argument 'dim0'."
|
||||
r"aten::transpose\(\) is missing value for argument 'dim0'."
|
||||
):
|
||||
torch.ops.aten.transpose(torch.ones(5, 5))
|
||||
|
||||
|
|
|
|||
|
|
@ -774,8 +774,8 @@ class TestNN(NNTestCase):
|
|||
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True)
|
||||
input = torch.randn(1, 3, 1, 1)
|
||||
with self.assertRaisesRegex(RuntimeError,
|
||||
'Calculated padded input size per channel: \(1 x 1\). ' +
|
||||
'Kernel size: \(10 x 10\). Kernel size can\'t be greater than actual input size'):
|
||||
r'Calculated padded input size per channel: \(1 x 1\). ' +
|
||||
r'Kernel size: \(10 x 10\). Kernel size can\'t be greater than actual input size'):
|
||||
module(input)
|
||||
|
||||
def test_invalid_conv3d(self):
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ IDENT_REGEX = r'(^|\W){}($|\W)'
|
|||
# TODO: Use a real parser here; this will get bamboozled
|
||||
# by signatures that contain things like std::array<bool, 2> (note the space)
|
||||
def split_name_params(prototype):
|
||||
name, params = re.match('(\w+)\((.*)\)', prototype).groups()
|
||||
name, params = re.match(r'(\w+)\((.*)\)', prototype).groups()
|
||||
return name, params.split(', ')
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -47,13 +47,13 @@ def find_cuda_version(cuda_home):
|
|||
candidate_names = [os.path.basename(c) for c in candidate_names]
|
||||
|
||||
# suppose version is MAJOR.MINOR.PATCH, all numbers
|
||||
version_regex = re.compile('[0-9]+\.[0-9]+\.[0-9]+')
|
||||
version_regex = re.compile(r'[0-9]+\.[0-9]+\.[0-9]+')
|
||||
candidates = [c.group() for c in map(version_regex.search, candidate_names) if c]
|
||||
if len(candidates) > 0:
|
||||
# normally only one will be retrieved, take the first result
|
||||
return candidates[0]
|
||||
# if no candidates were found, try MAJOR.MINOR
|
||||
version_regex = re.compile('[0-9]+\.[0-9]+')
|
||||
version_regex = re.compile(r'[0-9]+\.[0-9]+')
|
||||
candidates = [c.group() for c in map(version_regex.search, candidate_names) if c]
|
||||
if len(candidates) > 0:
|
||||
return candidates[0]
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ def parse_kwargs(desc):
|
|||
}
|
||||
"""
|
||||
# Split on exactly 4 spaces after a newline
|
||||
regx = re.compile("\n\s{4}(?!\s)")
|
||||
regx = re.compile(r"\n\s{4}(?!\s)")
|
||||
kwargs = [section.strip() for section in regx.split(desc)]
|
||||
kwargs = [section for section in kwargs if len(section) > 0]
|
||||
return {desc.split(' ')[0]: desc for desc in kwargs}
|
||||
|
|
|
|||
|
|
@ -220,7 +220,7 @@ class AdaptiveLogSoftmaxWithLoss(Module):
|
|||
return out
|
||||
|
||||
def log_prob(self, input):
|
||||
""" Computes log probabilities for all :math:`n\_classes`
|
||||
r""" Computes log probabilities for all :math:`n\_classes`
|
||||
|
||||
Args:
|
||||
input (Tensor): a minibatch of examples
|
||||
|
|
@ -240,7 +240,7 @@ class AdaptiveLogSoftmaxWithLoss(Module):
|
|||
return self._get_full_log_prob(input, head_output)
|
||||
|
||||
def predict(self, input):
|
||||
""" This is equivalent to `self.log_pob(input).argmax(dim=1)`,
|
||||
r""" This is equivalent to `self.log_pob(input).argmax(dim=1)`,
|
||||
but is more efficient in some cases.
|
||||
|
||||
Args:
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from .optimizer import Optimizer
|
|||
|
||||
|
||||
class Adam(Optimizer):
|
||||
"""Implements Adam algorithm.
|
||||
r"""Implements Adam algorithm.
|
||||
|
||||
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from .optimizer import Optimizer
|
|||
|
||||
|
||||
class SparseAdam(Optimizer):
|
||||
"""Implements lazy version of Adam algorithm suitable for sparse tensors.
|
||||
r"""Implements lazy version of Adam algorithm suitable for sparse tensors.
|
||||
|
||||
In this variant, only moments that show up in the gradient get updated, and
|
||||
only those portions of the gradient get applied to the parameters.
|
||||
|
|
|
|||
|
|
@ -65,9 +65,9 @@ def run_and_parse_first_match(run_lambda, command, regex):
|
|||
|
||||
def get_conda_packages(run_lambda):
|
||||
if get_platform() == 'win32':
|
||||
grep_cmd = 'findstr /R "torch soumith"'
|
||||
grep_cmd = r'findstr /R "torch soumith"'
|
||||
else:
|
||||
grep_cmd = 'grep "torch\|soumith"'
|
||||
grep_cmd = r'grep "torch\|soumith"'
|
||||
out = run_and_read_all(run_lambda, 'conda list | ' + grep_cmd)
|
||||
if out is None:
|
||||
return out
|
||||
|
|
@ -91,7 +91,7 @@ def get_nvidia_driver_version(run_lambda):
|
|||
|
||||
def get_gpu_info(run_lambda):
|
||||
smi = get_nvidia_smi()
|
||||
uuid_regex = re.compile(' \(UUID: .+?\)')
|
||||
uuid_regex = re.compile(r' \(UUID: .+?\)')
|
||||
rc, out, _ = run_lambda(smi + ' -L')
|
||||
if rc is not 0:
|
||||
return None
|
||||
|
|
@ -190,9 +190,9 @@ def get_pip_packages(run_lambda):
|
|||
# People generally have `pip` as `pip` or `pip3`
|
||||
def run_with_pip(pip):
|
||||
if get_platform() == 'win32':
|
||||
grep_cmd = 'findstr /R "numpy torch"'
|
||||
grep_cmd = r'findstr /R "numpy torch"'
|
||||
else:
|
||||
grep_cmd = 'grep "torch\|numpy"'
|
||||
grep_cmd = r'grep "torch\|numpy"'
|
||||
return run_and_read_all(run_lambda, pip + ' list --format=legacy | ' + grep_cmd)
|
||||
|
||||
if not PY3:
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user