mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69614 Previously sparse COO tensors were ignored during freezing, because `tryInsertConstant` would fail during `freeze_module.cpp`, and because hashes weren't implemented for COO tensor IValues. Test Plan: Imported from OSS Reviewed By: mrshenli Differential Revision: D32954620 Pulled By: davidberard98 fbshipit-source-id: a91f97fdfc2152b417f43a6948100c94970c0831
62 lines
1.7 KiB
Python
62 lines
1.7 KiB
Python
# Owner(s): ["oncall: jit"]
|
|
|
|
import io
|
|
import torch
|
|
from torch.testing._internal.jit_utils import JitTestCase
|
|
|
|
|
|
class TestSparse(JitTestCase):
|
|
def test_freeze_sparse_coo(self):
|
|
class SparseTensorModule(torch.nn.Module):
|
|
def __init__(self):
|
|
super().__init__()
|
|
self.a = torch.rand(3, 4).to_sparse()
|
|
self.b = torch.rand(3, 4).to_sparse()
|
|
|
|
def forward(self, x):
|
|
return x + self.a + self.b
|
|
|
|
x = torch.rand(3, 4).to_sparse()
|
|
|
|
m = SparseTensorModule()
|
|
unfrozen_result = m.forward(x)
|
|
|
|
m.eval()
|
|
frozen = torch.jit.freeze(torch.jit.script(m))
|
|
|
|
frozen_result = frozen.forward(x)
|
|
|
|
self.assertEqual(unfrozen_result, frozen_result)
|
|
|
|
buffer = io.BytesIO()
|
|
torch.jit.save(frozen, buffer)
|
|
buffer.seek(0)
|
|
loaded_model = torch.jit.load(buffer)
|
|
|
|
loaded_result = loaded_model.forward(x)
|
|
|
|
self.assertEqual(unfrozen_result, loaded_result)
|
|
|
|
def test_serialize_sparse_coo(self):
|
|
class SparseTensorModule(torch.nn.Module):
|
|
def __init__(self):
|
|
super().__init__()
|
|
self.a = torch.rand(3, 4).to_sparse()
|
|
self.b = torch.rand(3, 4).to_sparse()
|
|
|
|
def forward(self, x):
|
|
return x + self.a + self.b
|
|
|
|
x = torch.rand(3, 4).to_sparse()
|
|
m = SparseTensorModule()
|
|
expected_result = m.forward(x)
|
|
|
|
buffer = io.BytesIO()
|
|
torch.jit.save(torch.jit.script(m), buffer)
|
|
buffer.seek(0)
|
|
loaded_model = torch.jit.load(buffer)
|
|
|
|
loaded_result = loaded_model.forward(x)
|
|
|
|
self.assertEqual(expected_result, loaded_result)
|