mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Actually make flake8 do something (#30892)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/30892 Fixes all outstanding lints and actually installs a properly configured flake8 Test Plan: Imported from OSS Differential Revision: D18862825 Pulled By: suo fbshipit-source-id: 08e9083338a7309272e17bb803feaa42e348aa85
This commit is contained in:
parent
8d35b6cec7
commit
62b10721fb
2
.flake8
2
.flake8
|
|
@ -8,6 +8,6 @@ ignore =
|
|||
# these ignores are from flake8-bugbear; please fix!
|
||||
B007,B008,
|
||||
# these ignores are from flake8-comprehensions; please fix!
|
||||
C400,C401,C402,C403,C404,C405,C407,C411,
|
||||
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415,C416
|
||||
per-file-ignores = __init__.py: F401
|
||||
exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi,.git
|
||||
|
|
|
|||
3
.github/workflows/lint.yml
vendored
3
.github/workflows/lint.yml
vendored
|
|
@ -65,7 +65,8 @@ jobs:
|
|||
- name: Run flake8
|
||||
run: |
|
||||
set -eux
|
||||
pip install flake8
|
||||
pip install flake8 flake8-mypy flake8-bugbear flake8-comprehensions flake8-executable flake8-pyi mccabe pycodestyle pyflakes
|
||||
flake8 --version
|
||||
flake8 --exit-zero > ${GITHUB_WORKSPACE}/flake8-output.txt
|
||||
cat ${GITHUB_WORKSPACE}/flake8-output.txt
|
||||
- name: Add annotations
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
import os
|
||||
|
||||
|
|
|
|||
|
|
@ -629,24 +629,31 @@ def method_tests():
|
|||
('index_fill', (S, S), (0, torch.tensor(0, dtype=torch.int64), 2), 'scalar_index_dim', (), [0]),
|
||||
('index_fill', (), (0, torch.tensor([0], dtype=torch.int64), 2), 'scalar_input_dim', (), [0]),
|
||||
('index_fill', (), (0, torch.tensor(0, dtype=torch.int64), 2), 'scalar_both_dim', (), [0]),
|
||||
('inverse', lambda: random_fullrank_matrix_distinct_singular_value(S), NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('inverse', lambda: random_fullrank_matrix_distinct_singular_value(S),
|
||||
NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('inverse', lambda: random_fullrank_matrix_distinct_singular_value(S, 2, 3),
|
||||
NO_ARGS, 'batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', (S, S), NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', (1, 1), NO_ARGS, '1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_matrix(S), NO_ARGS, 'symmetric', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_psd_matrix(S), NO_ARGS, 'symmetric_psd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_pd_matrix(S), NO_ARGS, 'symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_square_matrix_of_rank(S, S - 2), NO_ARGS, 'dim2_null', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_psd_matrix(S),
|
||||
NO_ARGS, 'symmetric_psd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_pd_matrix(S),
|
||||
NO_ARGS, 'symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_square_matrix_of_rank(S, S - 2),
|
||||
NO_ARGS, 'dim2_null', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_square_matrix_of_rank(S, 1), NO_ARGS, 'rank1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_square_matrix_of_rank(S, 2), NO_ARGS, 'rank2', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_fullrank_matrix_distinct_singular_value(S), NO_ARGS,
|
||||
'distinct_singular_values', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', (3, 3, S, S), NO_ARGS, 'batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', (3, 3, 1, 1), NO_ARGS, 'batched_1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_matrix(S, 3), NO_ARGS, 'batched_symmetric', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_psd_matrix(S, 3), NO_ARGS, 'batched_symmetric_psd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_pd_matrix(S, 3), NO_ARGS, 'batched_symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_matrix(S, 3),
|
||||
NO_ARGS, 'batched_symmetric', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_psd_matrix(S, 3),
|
||||
NO_ARGS, 'batched_symmetric_psd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_symmetric_pd_matrix(S, 3),
|
||||
NO_ARGS, 'batched_symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('det', lambda: random_fullrank_matrix_distinct_singular_value(S, 3, 3), NO_ARGS,
|
||||
'batched_distinct_singular_values', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
# For `logdet` and `slogdet`, the function at det=0 is not smooth.
|
||||
|
|
@ -654,16 +661,20 @@ def method_tests():
|
|||
# `make_nonzero_det` to make the random matrices have nonzero det. For
|
||||
# `logdet`, we also set `make_nonzero_det(matrix, sign=1)` to make the
|
||||
# matrix have positive det.
|
||||
('logdet', lambda: make_nonzero_det(torch.randn(S, S), 1), NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(torch.randn(1, 1), 1), NO_ARGS, '1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(torch.randn(S, S), 1),
|
||||
NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(torch.randn(1, 1), 1),
|
||||
NO_ARGS, '1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(random_symmetric_matrix(S), 1), NO_ARGS,
|
||||
'symmetric', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(random_symmetric_pd_matrix(S), 1), NO_ARGS,
|
||||
'symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(random_fullrank_matrix_distinct_singular_value(S), 1, 0), NO_ARGS,
|
||||
'distinct_singular_values', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(torch.randn(3, 3, S, S), 1), NO_ARGS, 'batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(torch.randn(3, 3, 1, 1), 1), NO_ARGS, 'batched_1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(torch.randn(3, 3, S, S), 1),
|
||||
NO_ARGS, 'batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(torch.randn(3, 3, 1, 1), 1),
|
||||
NO_ARGS, 'batched_1x1', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(random_symmetric_matrix(S, 3), 1), NO_ARGS,
|
||||
'batched_symmetric', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('logdet', lambda: make_nonzero_det(random_symmetric_pd_matrix(S, 3), 1), NO_ARGS,
|
||||
|
|
@ -694,7 +705,8 @@ def method_tests():
|
|||
'batched_symmetric_pd', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma], itemgetter(1)),
|
||||
('slogdet', lambda: random_fullrank_matrix_distinct_singular_value(S, 3), NO_ARGS,
|
||||
'batched_distinct_singular_values', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma], itemgetter(1)),
|
||||
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S), NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S),
|
||||
NO_ARGS, '', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S)[:(S - 2)], NO_ARGS,
|
||||
'wide', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S)[:, :(S - 2)], NO_ARGS,
|
||||
|
|
@ -712,9 +724,11 @@ def method_tests():
|
|||
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S, 3)[..., :, :(S - 2)], NO_ARGS,
|
||||
'tall_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S, 3, 3)[..., :(S - 2), :], (False,),
|
||||
'wide_all_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma], lambda usv: (usv[0], usv[1], usv[2][..., :, :(S - 2)])),
|
||||
'wide_all_batched', (), NO_ARGS,
|
||||
[skipCPUIfNoLapack, skipCUDAIfNoMagma], lambda usv: (usv[0], usv[1], usv[2][..., :, :(S - 2)])),
|
||||
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S, 3, 3)[..., :, :(S - 2)], (False,),
|
||||
'tall_all_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma], lambda usv: (usv[0][..., :, :(S - 2)], usv[1], usv[2])),
|
||||
'tall_all_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma],
|
||||
lambda usv: (usv[0][..., :, :(S - 2)], usv[1], usv[2])),
|
||||
('qr', (S, S), (False,), 'square_single', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('qr', (S, S - 2), (True,), 'tall_single' , (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
('qr', (3, S, S), (False,), 'square_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),
|
||||
|
|
|
|||
|
|
@ -1168,6 +1168,7 @@ class DistAutogradTest(RpcAgentTestFixture):
|
|||
loss = ret.sum()
|
||||
local_grads = self._verify_backwards(exec_mode, [loss], context_id, local_grads, t1, t2)
|
||||
|
||||
@staticmethod
|
||||
def _complex_python_udf(t1, t2):
|
||||
t3 = torch.nn.functional.linear(t1, t2)
|
||||
t4 = torch.nn.functional.linear(t2, t3)
|
||||
|
|
@ -1187,11 +1188,13 @@ class DistAutogradTest(RpcAgentTestFixture):
|
|||
loss = ret.sum()
|
||||
local_grads = self._verify_backwards(exec_mode, [loss], context_id, local_grads, t1, t2)
|
||||
|
||||
@staticmethod
|
||||
def _python_udf_with_backward_error(t1, t2):
|
||||
t3 = t1 + t2
|
||||
t4 = SimulateBackwardError.apply(t3)
|
||||
return torch.chain_matmul(t1, t2, t3, t4)
|
||||
|
||||
@staticmethod
|
||||
def _nested_rpc_call_backward_error(t1, t2, dst):
|
||||
t1 = t1 * t2
|
||||
t2 = t1 + t2
|
||||
|
|
@ -1215,9 +1218,11 @@ class DistAutogradTest(RpcAgentTestFixture):
|
|||
|
||||
_backward_done = False
|
||||
|
||||
@staticmethod
|
||||
def _set_backward_done():
|
||||
DistAutogradTest._backward_done = True
|
||||
|
||||
@staticmethod
|
||||
def _wait_backward_done():
|
||||
while not DistAutogradTest._backward_done:
|
||||
time.sleep(0.1)
|
||||
|
|
@ -1260,6 +1265,7 @@ class DistAutogradTest(RpcAgentTestFixture):
|
|||
# Wait for backward to finish on rank 0.
|
||||
DistAutogradTest._wait_backward_done()
|
||||
|
||||
@staticmethod
|
||||
def _nested_python_udf(t1, t2, dst):
|
||||
t3 = t1 * t2
|
||||
t4 = t1 + t2
|
||||
|
|
|
|||
|
|
@ -4,6 +4,6 @@ import torch
|
|||
|
||||
|
||||
@torch.jit.script # noqa: B903
|
||||
class FooSameName(object):
|
||||
class FooSameName(object): # noqa: B903
|
||||
def __init__(self, y):
|
||||
self.y = y
|
||||
|
|
|
|||
|
|
@ -4,6 +4,6 @@ import torch
|
|||
|
||||
|
||||
@torch.jit.script # noqa: B903
|
||||
class FooUniqueName(object):
|
||||
class FooUniqueName(object): # noqa: B903
|
||||
def __init__(self, y):
|
||||
self.y = y
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ class TestClassType(JitTestCase):
|
|||
|
||||
def test_get_attr(self):
|
||||
@torch.jit.script # noqa: B903
|
||||
class FooTest(object):
|
||||
class FooTest(object): # noqa: B903
|
||||
def __init__(self, x):
|
||||
self.foo = x
|
||||
|
||||
|
|
@ -56,7 +56,7 @@ class TestClassType(JitTestCase):
|
|||
|
||||
def test_in(self):
|
||||
@torch.jit.script # noqa: B903
|
||||
class FooTest(object):
|
||||
class FooTest(object): # noqa: B903
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
|
@ -149,7 +149,7 @@ class TestClassType(JitTestCase):
|
|||
def test_type_annotations(self):
|
||||
with self.assertRaisesRegex(RuntimeError, "Expected a value of type \'bool"):
|
||||
@torch.jit.script # noqa: B903
|
||||
class FooTest(object):
|
||||
class FooTest(object): # noqa: B903
|
||||
def __init__(self, x):
|
||||
# type: (bool) -> None
|
||||
self.foo = x
|
||||
|
|
@ -171,7 +171,7 @@ class TestClassType(JitTestCase):
|
|||
def test_class_type_as_param(self):
|
||||
global FooTest # see [local resolution in python]
|
||||
@torch.jit.script # noqa: B903
|
||||
class FooTest(object):
|
||||
class FooTest(object): # noqa: B903
|
||||
def __init__(self, x):
|
||||
self.attr = x
|
||||
|
||||
|
|
@ -272,7 +272,7 @@ class TestClassType(JitTestCase):
|
|||
|
||||
def test_save_load_with_classes_nested(self):
|
||||
@torch.jit.script # noqa: B903
|
||||
class FooNestedTest(object):
|
||||
class FooNestedTest(object): # noqa: B903
|
||||
def __init__(self, y):
|
||||
self.y = y
|
||||
|
||||
|
|
@ -314,7 +314,7 @@ class TestClassType(JitTestCase):
|
|||
def test_python_interop(self):
|
||||
global Foo # see [local resolution in python]
|
||||
@torch.jit.script # noqa: B903
|
||||
class Foo(object):
|
||||
class Foo(object): # noqa: B903
|
||||
def __init__(self, x, y):
|
||||
self.x = x
|
||||
self.y = y
|
||||
|
|
@ -341,7 +341,7 @@ class TestClassType(JitTestCase):
|
|||
def test_class_specialization(self):
|
||||
global Foo # see [local resolution in python]
|
||||
@torch.jit.script # noqa: B903
|
||||
class Foo(object):
|
||||
class Foo(object): # noqa: B903
|
||||
def __init__(self, x, y):
|
||||
self.x = x
|
||||
self.y = y
|
||||
|
|
@ -366,7 +366,7 @@ class TestClassType(JitTestCase):
|
|||
def test_class_sorting(self):
|
||||
global Foo # see [local resolution in python]
|
||||
@torch.jit.script # noqa: B903
|
||||
class Foo(object):
|
||||
class Foo(object): # noqa: B903
|
||||
def __init__(self, x):
|
||||
# type: (int) -> None
|
||||
self.x = x
|
||||
|
|
@ -860,7 +860,7 @@ class TestClassType(JitTestCase):
|
|||
|
||||
def test_init_compiled_first(self):
|
||||
@torch.jit.script # noqa: B903
|
||||
class Foo(object):
|
||||
class Foo(object): # noqa: B903
|
||||
def __before_init__(self):
|
||||
# accessing this field should not throw, since __init__ should be compiled
|
||||
return self.x
|
||||
|
|
@ -871,7 +871,7 @@ class TestClassType(JitTestCase):
|
|||
|
||||
def test_class_constructs_itself(self):
|
||||
@torch.jit.script # noqa: B903
|
||||
class LSTMStateStack(object):
|
||||
class LSTMStateStack(object): # noqa: B903
|
||||
def __init__(self, num_layers, hidden_size):
|
||||
# type: (int, int) -> None
|
||||
self.num_layers = num_layers
|
||||
|
|
@ -896,7 +896,7 @@ class TestClassType(JitTestCase):
|
|||
|
||||
# should not throw
|
||||
@torch.jit.script # noqa: B903
|
||||
class Tree(object):
|
||||
class Tree(object): # noqa: B903
|
||||
def __init__(self):
|
||||
self.child = torch.jit.annotate(Optional[Leaf], None)
|
||||
|
||||
|
|
@ -910,6 +910,6 @@ class TestClassType(JitTestCase):
|
|||
"""
|
||||
with self.assertRaises(RuntimeError):
|
||||
@torch.jit.script # noqa: B903
|
||||
class Tree(object):
|
||||
class Tree(object): # noqa: B903
|
||||
def __init__(self):
|
||||
self.parent = torch.jit.annotate(Optional[Tree], None)
|
||||
|
|
|
|||
|
|
@ -339,7 +339,7 @@ class TestCaffe2Backend_opset9(unittest.TestCase):
|
|||
# (save the model with a batch_size of 1 with rnn with a variable batch size,
|
||||
# othewise expand will fail)
|
||||
variable_batch_size_init_input = make_input(1)
|
||||
# Constant folding works when model has parameters embedded. For this case, we need to disable it
|
||||
# Constant folding works when model has parameters embedded. For this case, we need to disable it
|
||||
onnxir, _ = do_export(model, variable_batch_size_init_input, keep_initializers_as_inputs=True,
|
||||
do_constant_folding=False)
|
||||
other_input = make_input(RNN_BATCH_SIZE + 1)
|
||||
|
|
@ -1427,7 +1427,9 @@ class TestCaffe2Backend_opset9(unittest.TestCase):
|
|||
|
||||
x = torch.ones(2, 3, 4)
|
||||
y = torch.ones(2, 3, 4) * 2
|
||||
self.run_model_test(Arithmetic(), train=False, input=(), batch_size=BATCH_SIZE, use_gpu=False, example_outputs=(x + 3, y * (x + 3)))
|
||||
self.run_model_test(Arithmetic(),
|
||||
train=False, input=(), batch_size=BATCH_SIZE,
|
||||
use_gpu=False, example_outputs=(x + 3, y * (x + 3)))
|
||||
|
||||
def test_tensor_factories(self):
|
||||
class TensorFactory(torch.nn.Module):
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ class TestQuantizedOps(unittest.TestCase):
|
|||
|
||||
pytorch_res = q_model(*pt_inputs)
|
||||
f = io.BytesIO()
|
||||
torch.onnx.export(q_model, pt_inputs, f, input_names=input_names, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
|
||||
torch.onnx.export(q_model, pt_inputs, f, input_names=input_names,
|
||||
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
|
||||
f.seek(0)
|
||||
onnx_model = onnx.load(f)
|
||||
caffe_res = c2.run_model(onnx_model, dict(zip(input_names, sample_inputs)))[0]
|
||||
|
|
@ -68,7 +69,8 @@ class TestQuantizedOps(unittest.TestCase):
|
|||
|
||||
model = torch.jit.load(buf)
|
||||
f = io.BytesIO()
|
||||
torch.onnx.export(model, input, f, input_names=input_names, example_outputs=outputs, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
|
||||
torch.onnx.export(model, input, f, input_names=input_names, example_outputs=outputs,
|
||||
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
|
||||
f.seek(0)
|
||||
|
||||
onnx_model = onnx.load(f)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import dist_utils
|
||||
import torch.distributed.rpc as rpc
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,8 @@ TEST_MEDIUM_TENSOR = TEST_CUDA
|
|||
TEST_CUDNN = TEST_CUDA
|
||||
if TEST_CUDA:
|
||||
torch.ones(1).cuda() # has_magma shows up after cuda is initialized
|
||||
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
|
||||
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
|
||||
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
|
||||
TEST_MAGMA = torch.cuda.has_magma
|
||||
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
|
||||
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
|
||||
|
|
|
|||
|
|
@ -3542,7 +3542,7 @@ graph(%Ra, %Rb):
|
|||
return x
|
||||
|
||||
class Test(torch.nn.Module):
|
||||
def forward(self, input=[]):
|
||||
def forward(self, input=[]): # noqa: B006
|
||||
return input
|
||||
|
||||
with self.assertRaisesRegex(Exception, "Mutable default parameters"):
|
||||
|
|
@ -5648,7 +5648,8 @@ a")
|
|||
self.assertEqual(grad, grad_ref)
|
||||
|
||||
|
||||
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.PROFILING, "Profiling executor fails to recognize that tensors in a list require gradients")
|
||||
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.PROFILING,
|
||||
"Profiling executor fails to recognize that tensors in a list require gradients")
|
||||
def test_meshgrid(self):
|
||||
with enable_profiling_mode():
|
||||
@torch.jit.script
|
||||
|
|
@ -6977,7 +6978,9 @@ a")
|
|||
|
||||
with self.assertRaises(RuntimeError) as cm:
|
||||
bar(torch.rand(10), torch.rand(9))
|
||||
FileCheck().check("The above operation failed in interpreter").check("Traceback (most recent call last)").check("in foo").check("in baz").run(str(cm.exception))
|
||||
FileCheck().check("The above operation failed in interpreter") \
|
||||
.check("Traceback (most recent call last)") \
|
||||
.check("in foo").check("in baz").run(str(cm.exception))
|
||||
|
||||
def test_error_stacktrace_interface(self):
|
||||
global IFace
|
||||
|
|
@ -7014,7 +7017,9 @@ a")
|
|||
with self.assertRaises(RuntimeError) as cm:
|
||||
x = f.one(torch.rand(10), torch.rand(9))
|
||||
bar(torch.rand(10), torch.rand(9))
|
||||
FileCheck().check("The above operation failed in interpreter").check("Traceback (most recent call last)").check("in foo").check("in baz").run(str(cm.exception))
|
||||
FileCheck().check("The above operation failed in interpreter") \
|
||||
.check("Traceback (most recent call last)") \
|
||||
.check("in foo").check("in baz").run(str(cm.exception))
|
||||
|
||||
def test_binop_unsupported_error(self):
|
||||
with self.assertRaisesRegex(NotSupportedError, "unsupported binary operator:"):
|
||||
|
|
@ -7305,7 +7310,8 @@ a")
|
|||
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
|
||||
g = test_dtype.graph_for(5, profile_and_replay=True)
|
||||
# both should have completed shapes
|
||||
FileCheck().check("Tensor = aten::tensor").check("Float() = prim::BailOut").check("Tensor = aten::tensor").check("Half() = prim::BailOut").run(g)
|
||||
FileCheck().check("Tensor = aten::tensor").check("Float() = prim::BailOut") \
|
||||
.check("Tensor = aten::tensor").check("Half() = prim::BailOut").run(g)
|
||||
else:
|
||||
g = test_dtype.graph_for(5)
|
||||
# first should have type set second should not
|
||||
|
|
@ -7318,7 +7324,8 @@ a")
|
|||
|
||||
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
|
||||
g = test_as_tensor_tensor_input.graph_for(torch.ones(3, 4), profile_and_replay=True)
|
||||
FileCheck().check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut").check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut").run(g)
|
||||
FileCheck().check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut") \
|
||||
.check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut").run(g)
|
||||
else:
|
||||
g = test_as_tensor_tensor_input.graph_for(torch.ones(3, 4))
|
||||
FileCheck().check("Tensor = aten::as_tensor").check("Float(*, *) = aten::as_tensor").run(g)
|
||||
|
|
@ -11588,7 +11595,7 @@ a")
|
|||
with self.assertRaisesRegex(RuntimeError, 'methods must have a self argument'):
|
||||
class MethodNoSelf(torch.jit.ScriptModule):
|
||||
@torch.jit.script_method # noqa: B902
|
||||
def forward():
|
||||
def forward(): # noqa: B902
|
||||
return torch.zeros(3, 4)
|
||||
|
||||
MethodNoSelf()
|
||||
|
|
|
|||
|
|
@ -665,8 +665,11 @@ class PostTrainingDynamicQuantTest(QuantizationTestCase):
|
|||
super(ScriptWrapperPacked, self).__init__()
|
||||
self.cell = cell
|
||||
|
||||
def forward(self, x, hiddens):
|
||||
# type: (PackedSequence, Tuple[torch.Tensor, torch.Tensor]) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]
|
||||
def forward(self,
|
||||
x, # type: PackedSequence
|
||||
hiddens # type: Tuple[torch.Tensor, torch.Tensor]
|
||||
):
|
||||
# type: (...) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]
|
||||
return self.cell(x, hiddens)
|
||||
|
||||
cell_packed = torch.jit.script(ScriptWrapperPacked(cell_int8))
|
||||
|
|
|
|||
|
|
@ -2111,7 +2111,8 @@ class TestSparse(TestCase):
|
|||
|
||||
def test_div_by_sparse_error(self):
|
||||
self.assertRaisesRegex(RuntimeError, 'A Sparse Tensor can only be divided',
|
||||
lambda: torch.tensor(1., device=self.device).to_sparse() / torch.tensor(1., device=self.device).to_sparse())
|
||||
lambda: torch.tensor(1., device=self.device).to_sparse()
|
||||
/ torch.tensor(1., device=self.device).to_sparse())
|
||||
|
||||
|
||||
class TestUncoalescedSparse(TestSparse):
|
||||
|
|
|
|||
|
|
@ -6318,13 +6318,17 @@ class TestTorchDeviceType(TestCase):
|
|||
for target in (torch.tensor(0, device=device), torch.tensor([0], device=device), torch.tensor([[0]], device=device)):
|
||||
if (input.dim() <= 1 and target.dim() <= 1) or (input.dim() == 2 and target.dim() == 2):
|
||||
output_shape = (target.shape[0],) if target.dim() == 2 else ()
|
||||
self.assertEqual(output_shape, torch.nn.functional.multilabel_margin_loss(input, target, reduction='none').shape)
|
||||
self.assertEqual(output_shape,
|
||||
torch.nn.functional.multilabel_margin_loss(input, target, reduction='none').shape)
|
||||
self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean').shape)
|
||||
self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum').shape)
|
||||
else:
|
||||
self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='none'))
|
||||
self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean'))
|
||||
self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum'))
|
||||
self.assertRaises(RuntimeError,
|
||||
lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='none'))
|
||||
self.assertRaises(RuntimeError,
|
||||
lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean'))
|
||||
self.assertRaises(RuntimeError,
|
||||
lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum'))
|
||||
|
||||
# multi_margin_loss
|
||||
for input in (zero_d, one_d, torch.randn(1, 1, device=device)):
|
||||
|
|
@ -13071,7 +13075,8 @@ class TestTorchDeviceType(TestCase):
|
|||
else:
|
||||
check_sum_all(torch.tensor([True, False, True], dtype=torch.bool, device=device))
|
||||
|
||||
def _test_memory_format_transformations(self, device, input_generator_fn, transformation_fn, compare_data=True, default_is_preserve=False):
|
||||
def _test_memory_format_transformations(self, device, input_generator_fn, transformation_fn,
|
||||
compare_data=True, default_is_preserve=False):
|
||||
nhwc = input_generator_fn(device)
|
||||
# nhwc is not memory dense, but looks like channels last
|
||||
nhwc = nhwc[:, :, ::2, ::2]
|
||||
|
|
@ -13111,7 +13116,8 @@ class TestTorchDeviceType(TestCase):
|
|||
|
||||
def test_memory_format_to(self, device):
|
||||
def input_generator_fn(device):
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32).contiguous(memory_format=torch.channels_last)
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32) \
|
||||
.contiguous(memory_format=torch.channels_last)
|
||||
|
||||
def transformation_fn(tensor, **kwargs):
|
||||
return tensor.to(dtype=torch.float64, **kwargs)
|
||||
|
|
@ -13120,7 +13126,8 @@ class TestTorchDeviceType(TestCase):
|
|||
|
||||
def test_memory_format_type(self, device):
|
||||
def input_generator_fn(device):
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32).contiguous(memory_format=torch.channels_last)
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32) \
|
||||
.contiguous(memory_format=torch.channels_last)
|
||||
|
||||
def transformation_fn(tensor, **kwargs):
|
||||
return tensor.type(torch.float64, **kwargs)
|
||||
|
|
@ -13129,7 +13136,8 @@ class TestTorchDeviceType(TestCase):
|
|||
|
||||
def test_memory_format_clone(self, device):
|
||||
def input_generator_fn(device):
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32).contiguous(memory_format=torch.channels_last)
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32) \
|
||||
.contiguous(memory_format=torch.channels_last)
|
||||
|
||||
def transformation_fn(tensor, **kwargs):
|
||||
return tensor.clone(**kwargs)
|
||||
|
|
@ -13152,7 +13160,8 @@ class TestTorchDeviceType(TestCase):
|
|||
|
||||
def test_memory_format_factory_like_functions_preserve_strides(self, device):
|
||||
def input_generator_fn(device):
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32).contiguous(memory_format=torch.channels_last)
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32) \
|
||||
.contiguous(memory_format=torch.channels_last)
|
||||
|
||||
transformation_fns = [
|
||||
lambda t, **kwargs: torch.zeros_like(t, **kwargs),
|
||||
|
|
@ -13169,7 +13178,8 @@ class TestTorchDeviceType(TestCase):
|
|||
|
||||
def test_memory_format_type_shortcuts(self, device):
|
||||
def input_generator_fn(device):
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32).clamp(0, 1).round().contiguous(memory_format=torch.channels_last)
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float32).clamp(0, 1) \
|
||||
.round().contiguous(memory_format=torch.channels_last)
|
||||
|
||||
def get_fn(fn_name):
|
||||
def transformation_fn(tensor, **kwargs):
|
||||
|
|
@ -13186,7 +13196,8 @@ class TestTorchDeviceType(TestCase):
|
|||
|
||||
# Test 'float' separately to avoid float->float no-op.
|
||||
def input_generator_fn_double(device):
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float64).clamp(0, 1).round().contiguous(memory_format=torch.channels_last)
|
||||
return torch.randn((4, 3, 8, 8), device=device, dtype=torch.float64).clamp(0, 1) \
|
||||
.round().contiguous(memory_format=torch.channels_last)
|
||||
|
||||
self._test_memory_format_transformations(device, input_generator_fn_double, get_fn('float'))
|
||||
|
||||
|
|
|
|||
|
|
@ -186,7 +186,7 @@ class TestTypePromotion(TestCase):
|
|||
tensor[torch.abs(tensor) < 0.05] = 5
|
||||
return tensor
|
||||
|
||||
# verifies that torch.<op>(first, second) is the same as
|
||||
# verifies that torch.<op>(first, second) is the same as
|
||||
# torch.<op>(first.to(common_dtype), second.to(common_dtype)) in cases where that should hold.
|
||||
@float_double_default_dtype
|
||||
def test_many_promotions(self, device):
|
||||
|
|
@ -209,7 +209,8 @@ class TestTypePromotion(TestCase):
|
|||
if non_contiguous:
|
||||
first = first.transpose(0, 2)
|
||||
second = second.transpose(2, 1)
|
||||
self.assertNotEqual(first.stride(), second.stride(), "some non-contiguous issues could be missed if tensors have same strides")
|
||||
self.assertNotEqual(first.stride(), second.stride(),
|
||||
"some non-contiguous issues could be missed if tensors have same strides")
|
||||
|
||||
self.assertEqual(not first.is_contiguous(), non_contiguous)
|
||||
self.assertEqual(not second.is_contiguous(), non_contiguous)
|
||||
|
|
@ -249,7 +250,9 @@ class TestTypePromotion(TestCase):
|
|||
|
||||
self.assertRaisesRegex(RuntimeError, "Boolean alpha only supported",
|
||||
lambda: torch.add(1, 1, alpha=True))
|
||||
self.assertEqual(torch.add(torch.tensor(True, device=device), torch.tensor(True, device=device), True), torch.tensor(True, device=device))
|
||||
self.assertEqual(torch.add(torch.tensor(True, device=device),
|
||||
torch.tensor(True, device=device), True),
|
||||
torch.tensor(True, device=device))
|
||||
|
||||
@float_double_default_dtype
|
||||
def test_create_bool_tensors(self, device):
|
||||
|
|
@ -273,9 +276,13 @@ class TestTypePromotion(TestCase):
|
|||
self.assertEqual(torch.result_type(1, torch.tensor(1, dtype=torch.int, device=device)), torch.int)
|
||||
self.assertEqual(torch.result_type(1, 1.), torch.get_default_dtype())
|
||||
self.assertEqual(torch.result_type(torch.tensor(1, device=device), 1.), torch.get_default_dtype())
|
||||
self.assertEqual(torch.result_type(torch.tensor(1, dtype=torch.long, device=device), torch.tensor([1, 1], dtype=torch.int, device=device)), torch.int)
|
||||
self.assertEqual(torch.result_type(torch.tensor(1, dtype=torch.long, device=device),
|
||||
torch.tensor([1, 1], dtype=torch.int, device=device)),
|
||||
torch.int)
|
||||
self.assertEqual(torch.result_type(torch.tensor([1., 1.], dtype=torch.float, device=device), 1.), torch.float)
|
||||
self.assertEqual(torch.result_type(torch.tensor(1., dtype=torch.float, device=device), torch.tensor(1, dtype=torch.double, device=device)), torch.double)
|
||||
self.assertEqual(torch.result_type(torch.tensor(1., dtype=torch.float, device=device),
|
||||
torch.tensor(1, dtype=torch.double, device=device)),
|
||||
torch.double)
|
||||
|
||||
@float_double_default_dtype
|
||||
def test_can_cast(self, device):
|
||||
|
|
|
|||
|
|
@ -4320,7 +4320,8 @@ Args:
|
|||
|
||||
add_docstr(torch.randint,
|
||||
r"""
|
||||
randint(low=0, high, size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
|
||||
randint(low=0, high, size, *, generator=None, out=None, \
|
||||
dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
|
||||
|
||||
Returns a tensor filled with random integers generated uniformly
|
||||
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
|
||||
|
|
|
|||
|
|
@ -300,7 +300,8 @@ def gradcheck(func, inputs, eps=1e-6, atol=1e-5, rtol=1e-3, raise_exception=True
|
|||
diff_input_list = list(iter_tensors(tupled_inputs, True))
|
||||
if not diff_input_list:
|
||||
raise RuntimeError("no Tensors requiring grad found in input")
|
||||
grads_input = torch.autograd.grad(output, diff_input_list, [torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in output],
|
||||
grads_input = torch.autograd.grad(output, diff_input_list,
|
||||
[torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in output],
|
||||
allow_unused=True)
|
||||
for gi, i in zip(grads_input, diff_input_list):
|
||||
if gi is None:
|
||||
|
|
|
|||
|
|
@ -160,7 +160,9 @@ def lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True):
|
|||
if unpack_pivots:
|
||||
LU_pivots_zero_idx = LU_pivots - 1
|
||||
if LU_data.dim() > 2:
|
||||
P = torch.eye(m, device=LU_data.device, dtype=LU_data.dtype).expand(shape[:-1] + (m,)).clone(memory_format=torch.contiguous_format)
|
||||
P = torch.eye(m, device=LU_data.device, dtype=LU_data.dtype) \
|
||||
.expand(shape[:-1] + (m,)) \
|
||||
.clone(memory_format=torch.contiguous_format)
|
||||
for idx in product(*map(lambda x: list(range(x)), shape[:-2])):
|
||||
final_order = list(range(m))
|
||||
for k, j in enumerate(LU_pivots_zero_idx[idx]):
|
||||
|
|
|
|||
|
|
@ -1741,7 +1741,7 @@ if _enabled:
|
|||
# dir is defined by the base nn.Module, so instead of throwing if
|
||||
# it is not overriden, we call into the nn.Module __dir__ method
|
||||
def __dir__(self):
|
||||
self_method = getattr(self, "__dir__")
|
||||
self_method = self.__dir__
|
||||
if self_method.__func__ == get_function_from_type(RecursiveScriptModule, "__dir__"):
|
||||
return super(RecursiveScriptModule, self).__dir__()
|
||||
return self_method()
|
||||
|
|
@ -1750,7 +1750,7 @@ if _enabled:
|
|||
# is defined then returns true for classes. because __iter__() on this
|
||||
# class throws if it isn't overriden, we define __bool__ to preserve default behavior
|
||||
def __bool__(self):
|
||||
self_method = getattr(self, "__bool__")
|
||||
self_method = self.__bool__
|
||||
if self_method.__func__ == get_function_from_type(RecursiveScriptModule, "__bool__"):
|
||||
return True
|
||||
return self_method()
|
||||
|
|
|
|||
|
|
@ -13,7 +13,8 @@ def register_quantized_ops(domain, version):
|
|||
quant_version_ops = getmembers(sym_registry._symbolic_versions['caffe2'])
|
||||
for op in quant_version_ops:
|
||||
if isfunction(op[1]) and not sym_registry.is_registered_op(op[0], domain, version):
|
||||
aten_q_ops = ['relu', '_empty_affine_quantized', 'dequantize', 'quantize_per_tensor', 'upsample_nearest2d', 'avg_pool2d', 'reshape', 'slice']
|
||||
aten_q_ops = ['relu', '_empty_affine_quantized', 'dequantize',
|
||||
'quantize_per_tensor', 'upsample_nearest2d', 'avg_pool2d', 'reshape', 'slice']
|
||||
if op[0] in aten_q_ops:
|
||||
sym_registry.register_op(op[0], op[1], '', version)
|
||||
sym_registry.register_op(op[0], op[1], domain, version)
|
||||
|
|
|
|||
|
|
@ -107,11 +107,13 @@ def __interpolate(g, input, size, scale_factor, mode, align_corners):
|
|||
except AttributeError:
|
||||
is_scalar = not sym_help._is_packed_list(size)
|
||||
if not is_scalar:
|
||||
warnings.warn("Cannot verify if the output_size is a scalar while exporting interpolate. Assuming that it is not a scalar.")
|
||||
warnings.warn("Cannot verify if the output_size is a scalar "
|
||||
"while exporting interpolate. Assuming that it is not a scalar.")
|
||||
|
||||
if is_scalar:
|
||||
if not input.type().dim():
|
||||
return sym_help._unimplemented("interpolate (with a scalar output_size)", "missing input shape (try giving an array of output_size values)")
|
||||
return sym_help._unimplemented("interpolate (with a scalar output_size)",
|
||||
"missing input shape (try giving an array of output_size values)")
|
||||
size = unsqueeze(g, size, 0)
|
||||
size = [size for i in range(input.type().dim() - 2)]
|
||||
size = g.op("Concat", *size, axis_i=0)
|
||||
|
|
|
|||
|
|
@ -878,7 +878,9 @@ def log_softmax(g, input, dim, dtype=None):
|
|||
# TODO: remove this as onnx opset 11 spec allows negative axes
|
||||
input_dim = input.type().dim()
|
||||
if input_dim is None:
|
||||
return _unimplemented("dim", "ONNX and PyTorch use different strategies to split the input. Input rank must be known at export time.")
|
||||
return _unimplemented("dim",
|
||||
"ONNX and PyTorch use different strategies to split the input. "
|
||||
"Input rank must be known at export time.")
|
||||
if dim < 0:
|
||||
dim = input_dim + dim
|
||||
is_transpose_required = (input_dim != dim + 1)
|
||||
|
|
|
|||
|
|
@ -247,7 +247,8 @@ def _trace(func, args, operator_export_type, return_outs=False):
|
|||
if isinstance(args, torch.Tensor):
|
||||
args = (args, )
|
||||
|
||||
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(func, args, _force_outplace=False, _return_inputs_states=True)
|
||||
trace_graph, torch_out, inputs_states = \
|
||||
torch.jit._get_trace_graph(func, args, _force_outplace=False, _return_inputs_states=True)
|
||||
warn_on_static_input_change(inputs_states)
|
||||
|
||||
trace_graph = _optimize_graph(trace_graph, operator_export_type)
|
||||
|
|
@ -268,7 +269,8 @@ def _trace_and_get_graph_from_model(model, args, training):
|
|||
# can turn training=True (or None, to preserve whatever the original
|
||||
# training mode was.)
|
||||
with set_training(model, training):
|
||||
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(model, args, _force_outplace=False, _return_inputs_states=True)
|
||||
trace_graph, torch_out, inputs_states = \
|
||||
torch.jit._get_trace_graph(model, args, _force_outplace=False, _return_inputs_states=True)
|
||||
warn_on_static_input_change(inputs_states)
|
||||
|
||||
if orig_state_dict_keys != _unique_state_dict(model).keys():
|
||||
|
|
|
|||
|
|
@ -24,7 +24,8 @@ class ConvPackedParams(torch.nn.Module):
|
|||
@torch.jit.export
|
||||
def set_weight_bias(self, weight, bias):
|
||||
# type: (torch.Tensor, Optional[torch.Tensor]) -> None
|
||||
self._packed_params = torch.ops.quantized.conv2d_prepack(weight, bias, self.stride, self.padding, self.dilation, self.groups)
|
||||
self._packed_params = torch.ops.quantized.conv2d_prepack(weight, bias, self.stride,
|
||||
self.padding, self.dilation, self.groups)
|
||||
|
||||
@torch.jit.export
|
||||
def _weight_bias(self):
|
||||
|
|
|
|||
|
|
@ -54,7 +54,10 @@ class Tensor(torch._C._TensorBase):
|
|||
if self.qscheme() == torch.per_tensor_affine:
|
||||
quantizer_params = self.qscheme(), self.q_scale(), self.q_zero_point()
|
||||
elif self.qscheme() == torch.per_channel_affine:
|
||||
quantizer_params = self.qscheme(), self.q_per_channel_scales(), self.q_per_channel_zero_points(), self.q_per_channel_axis()
|
||||
quantizer_params = self.qscheme(), \
|
||||
self.q_per_channel_scales(), \
|
||||
self.q_per_channel_zero_points(), \
|
||||
self.q_per_channel_axis()
|
||||
else:
|
||||
raise RuntimeError("Unsupported qscheme {} in deepcopy".format(self.qscheme()))
|
||||
new_tensor = torch._utils._rebuild_qtensor(
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user