mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Super resolution export to Caffe2 is broken, skip it. (#21479)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/21479 ghimport-source-id: 60fa97fb2dfb37a758c0e8b9c2bc0fb2819fd2f7 Differential Revision: D15713609 Pulled By: ezyang fbshipit-source-id: a3d9c49e2db985f4373508cd44e94d43ae6e24da
This commit is contained in:
parent
78a376592d
commit
d6af6588c2
|
|
@ -42,7 +42,10 @@ if [[ $PARALLEL == 1 ]]; then
|
||||||
args+=("3")
|
args+=("3")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# These exclusions are for tests that take a long time / a lot of GPU
|
||||||
|
# memory to run; they should be passing (and you will test them if you
|
||||||
|
# run them locally
|
||||||
pytest "${args[@]}" \
|
pytest "${args[@]}" \
|
||||||
-k \
|
-k \
|
||||||
'not (TestOperators and test_full_like) and not (TestOperators and test_zeros_like) and not (TestOperators and test_ones_like) and not (TestModels and test_super_resolution) and not (TestModels and test_vgg16) and not (TestModels and test_vgg16_bn) and not (TestModels and test_vgg19) and not (TestModels and test_vgg19_bn)' \
|
'not (TestOperators and test_full_like) and not (TestOperators and test_zeros_like) and not (TestOperators and test_ones_like) and not (TestModels and test_vgg16) and not (TestModels and test_vgg16_bn) and not (TestModels and test_vgg19) and not (TestModels and test_vgg19_bn)' \
|
||||||
"${test_paths[@]}"
|
"${test_paths[@]}"
|
||||||
|
|
|
||||||
|
|
@ -69,6 +69,7 @@ class TestModels(TestCase):
|
||||||
self.exportTest(toC(SRResNet(rescale_factor=4, n_filters=64, n_blocks=8)), toC(x))
|
self.exportTest(toC(SRResNet(rescale_factor=4, n_filters=64, n_blocks=8)), toC(x))
|
||||||
|
|
||||||
@skipIfNoLapack
|
@skipIfNoLapack
|
||||||
|
@unittest.skip("This model is broken, see https://github.com/pytorch/pytorch/issues/18429")
|
||||||
def test_super_resolution(self):
|
def test_super_resolution(self):
|
||||||
x = Variable(
|
x = Variable(
|
||||||
torch.randn(BATCH_SIZE, 1, 224, 224).fill_(1.0)
|
torch.randn(BATCH_SIZE, 1, 224, 224).fill_(1.0)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user