add tests for add_custom_scalars and others (#20987)

Summary:
Originally, the tests for tensorboard writer are smoke tests only. This PR lets CI compare the output with expected results at low level. The randomness of the tensors in the test are also removed.
ps. I found that how protobuf serializes data differs between different python environment. One method to solve this is to write the data and then read it back instantly. (compare the data at a higher level)

For `add_custom_scalars`, the data to be written is a dictionary. and the serialized result might be different (not `ordereddict`). So only smoke test for that.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/20987

Reviewed By: NarineK, lanpa

Differential Revision: D15804871

Pulled By: orionr

fbshipit-source-id: 69324c11ff823b19960d50def73adff36eb4a2ac
This commit is contained in:
Tzu-Wei Huang 2019-06-14 12:16:50 -07:00 committed by Facebook Github Bot
parent 0d6eb209e6
commit 1fc240e59a
14 changed files with 259 additions and 28 deletions

View File

@ -0,0 +1,10 @@
value {
tag: "dummy"
audio {
sample_rate: 44100.0
num_channels: 1
length_frames: 42
encoded_audio_string: "RIFFx\000\000\000WAVEfmt \020\000\000\000\001\000\001\000D\254\000\000\210X\001\000\002\000\020\000dataT\000\000\000\000\000\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177\377\177"
content_type: "audio/wav"
}
}

View File

@ -0,0 +1,23 @@
value {
tag: "dummy"
histo {
max: 1023.0
num: 1024.0
sum: 523776.0
sum_squares: 357389824.0
bucket_limit: 0.0
bucket_limit: 186.0
bucket_limit: 372.0
bucket_limit: 558.0
bucket_limit: 744.0
bucket_limit: 930.0
bucket_limit: 1023.0
bucket: 0.0
bucket: 186.0
bucket: 186.0
bucket: 186.0
bucket: 186.0
bucket: 186.0
bucket: 94.0
}
}

View File

@ -0,0 +1,23 @@
value {
tag: "dummy"
histo {
max: 1023.0
num: 1024.0
sum: 523776.0
sum_squares: 357389824.0
bucket_limit: 0.0
bucket_limit: 186.0
bucket_limit: 372.0
bucket_limit: 558.0
bucket_limit: 744.0
bucket_limit: 930.0
bucket_limit: 1023.0
bucket: 0.0
bucket: 186.0
bucket: 186.0
bucket: 186.0
bucket: 186.0
bucket: 186.0
bucket: 94.0
}
}

View File

@ -0,0 +1,23 @@
value {
tag: "dummy"
histo {
max: 1023.0
num: 1024.0
sum: 523776.0
sum_squares: 357389824.0
bucket_limit: 0.0
bucket_limit: 186.0
bucket_limit: 372.0
bucket_limit: 558.0
bucket_limit: 744.0
bucket_limit: 930.0
bucket_limit: 1023.0
bucket: 0.0
bucket: 186.0
bucket: 186.0
bucket: 186.0
bucket: 186.0
bucket: 186.0
bucket: 94.0
}
}

View File

@ -0,0 +1,9 @@
value {
tag: "dummy"
image {
height: 8
width: 16
colorspace: 3
encoded_image_string: "\211PNG\r\n\032\n\000\000\000\rIHDR\000\000\000\020\000\000\000\010\010\002\000\000\000\177\024\350\300\000\000\000+IDATx\234cd8\320\360\037\033pww\307*\316\362\343\307\217\037\330$~\374\370\361\037\233\004\013\016\365\377q\211\217H\r\000d\305y\224,\220Z\033\000\000\000\000IEND\256B`\202"
}
}

View File

@ -0,0 +1,9 @@
value {
tag: "dummy"
image {
height: 32
width: 32
colorspace: 3
encoded_image_string: "\211PNG\r\n\032\n\000\000\000\rIHDR\000\000\000 \000\000\000 \010\002\000\000\000\374\030\355\243\000\000\000sIDATx\234\355\323=\n\300 \014\005\340\027p\250\267p\324\373\332\373\345\020vn\007\367>0\204b\311\233\305/\344G\000\334\236\021Uu\005R\000\377\007\244\224\342\013||\007\2655\330BfP\215\337S`>:{_l\020\335\242\tX6-\000\032r\007G\316\000\2561\226\201\244\252/\005V\357\026\271\003\033\0149\000\232\270\003+\260\301\220\003\240y\000T\221\324V\250_v\320\000\000\000\000IEND\256B`\202"
}
}

View File

@ -0,0 +1,9 @@
value {
tag: "dummy"
image {
height: 8
width: 8
colorspace: 3
encoded_image_string: "\211PNG\r\n\032\n\000\000\000\rIHDR\000\000\000\010\000\000\000\010\010\002\000\000\000Km)\334\000\000\000\031IDATx\234cd``\370\217\r0\376\370\361\003\253\004\313\240\224\000\000;\267\273\313%\020=\255\000\000\000\000IEND\256B`\202"
}
}

View File

@ -0,0 +1,9 @@
value {
tag: "dummy"
image {
height: 8
width: 16
colorspace: 3
encoded_image_string: "\211PNG\r\n\032\n\000\000\000\rIHDR\000\000\000\020\000\000\000\010\010\002\000\000\000\177\024\350\300\000\000\000(IDATx\234cd``\370\217\r\034?~\034\2538\313\217\037?~\374\370\201)\201U\020\252\001\253\304\250\006$\000\000\230\346y\315\204l;t\000\000\000\000IEND\256B`\202"
}
}

View File

@ -0,0 +1,9 @@
value {
tag: "dummy"
image {
height: 8
width: 8
colorspace: 3
encoded_image_string: "\211PNG\r\n\032\n\000\000\000\rIHDR\000\000\000\010\000\000\000\010\010\002\000\000\000Km)\334\000\000\000\031IDATx\234cd``\370\217\r0\376\370\361\003\253\004\313\240\224\000\000;\267\273\313%\020=\255\000\000\000\000IEND\256B`\202"
}
}

View File

@ -0,0 +1,26 @@
value {
tag: "tag"
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 6
}
dim {
size: 1
}
}
float_val: 57.0
float_val: 43.0
float_val: 0.0
float_val: 0.0
float_val: 0.57
float_val: 1.0
}
metadata {
plugin_data {
plugin_name: "pr_curves"
content: "\020\001"
}
}
}

View File

@ -0,0 +1,50 @@
value {
tag: "prcurve with raw data"
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 6
}
dim {
size: 5
}
}
float_val: 75.0
float_val: 64.0
float_val: 21.0
float_val: 5.0
float_val: 0.0
float_val: 150.0
float_val: 105.0
float_val: 18.0
float_val: 0.0
float_val: 0.0
float_val: 0.0
float_val: 45.0
float_val: 132.0
float_val: 150.0
float_val: 150.0
float_val: 0.0
float_val: 11.0
float_val: 54.0
float_val: 70.0
float_val: 75.0
float_val: 0.3333333
float_val: 0.3786982
float_val: 0.5384616
float_val: 1.0
float_val: 0.0
float_val: 1.0
float_val: 0.8533334
float_val: 0.28
float_val: 0.0666667
float_val: 0.0
}
metadata {
plugin_data {
plugin_name: "pr_curves"
content: "\020\001"
}
}
}

View File

@ -0,0 +1,17 @@
value {
tag: "dummy/text_summary"
tensor {
dtype: DT_STRING
tensor_shape {
dim {
size: 1
}
}
string_val: "text 123"
}
metadata {
plugin_data {
plugin_name: "text"
}
}
}

View File

@ -0,0 +1,9 @@
value {
tag: "dummy"
image {
height: 16
width: 16
colorspace: 1
encoded_image_string: "GIF89a\020\000\020\000\207\000\000\377\377\377\376\376\376\375\375\375\374\374\374\373\373\373\372\372\372\371\371\371\370\370\370\367\367\367\366\366\366\365\365\365\364\364\364\363\363\363\362\362\362\361\361\361\360\360\360\357\357\357\356\356\356\355\355\355\354\354\354\353\353\353\352\352\352\351\351\351\350\350\350\347\347\347\346\346\346\345\345\345\344\344\344\343\343\343\342\342\342\341\341\341\340\340\340\337\337\337\336\336\336\335\335\335\334\334\334\333\333\333\332\332\332\331\331\331\330\330\330\327\327\327\326\326\326\325\325\325\324\324\324\323\323\323\322\322\322\321\321\321\320\320\320\317\317\317\316\316\316\315\315\315\314\314\314\313\313\313\312\312\312\311\311\311\310\310\310\307\307\307\306\306\306\305\305\305\304\304\304\303\303\303\302\302\302\301\301\301\300\300\300\277\277\277\276\276\276\275\275\275\274\274\274\273\273\273\272\272\272\271\271\271\270\270\270\267\267\267\266\266\266\265\265\265\264\264\264\263\263\263\262\262\262\261\261\261\260\260\260\257\257\257\256\256\256\255\255\255\254\254\254\253\253\253\252\252\252\251\251\251\250\250\250\247\247\247\246\246\246\245\245\245\244\244\244\243\243\243\242\242\242\241\241\241\240\240\240\237\237\237\236\236\236\235\235\235\234\234\234\233\233\233\232\232\232\231\231\231\230\230\230\227\227\227\226\226\226\225\225\225\224\224\224\223\223\223\222\222\222\221\221\221\220\220\220\217\217\217\216\216\216\215\215\215\214\214\214\213\213\213\212\212\212\211\211\211\210\210\210\207\207\207\206\206\206\205\205\205\204\204\204\203\203\203\202\202\202\201\201\201\200\200\200\177\177\177~~~}}}|||{{{zzzyyyxxxwwwvvvuuutttsssrrrqqqpppooonnnmmmlllkkkjjjiiihhhgggfffeeedddcccbbbaaa```___^^^]]]\\\\\\[[[ZZZYYYXXXWWWVVVUUUTTTSSSRRRQQQPPPOOONNNMMMLLLKKKJJJIIIHHHGGGFFFEEEDDDCCCBBBAAA@@@???>>>===<<<;;;:::999888777666555444333222111000///...---,,,+++***)))(((\'\'\'&&&%%%$$$###\"\"\"!!! \037\037\037\036\036\036\035\035\035\034\034\034\033\033\033\032\032\032\031\031\031\030\030\030\027\027\027\026\026\026\025\025\025\024\024\024\023\023\023\022\022\022\021\021\021\020\020\020\017\017\017\016\016\016\r\r\r\014\014\014\013\013\013\n\n\n\t\t\t\010\010\010\007\007\007\006\006\006\005\005\005\004\004\004\003\003\003\002\002\002\001\001\001\000\000\000!\377\013NETSCAPE2.0\003\001\377\377\000!\371\004\010\031\000\000\000,\000\000\000\000\020\000\020\000\000\010\377\000\377\001\010 `\000\201\002\006~\001\013&l\030\261b\306\016 H\240`\001\203\006\016\216!K\246l\031\263f\316\036@\210 a\002\205\n\026\236A\213&m\032\265j\326.`\310\240a\003\207\016\036\256a\313\246m\033\267n\336>\200\010!b\004\211\022&\276\201\013\'n\034\271r\346N\240H\241b\005\213\026.\316\241K\247n\035\273v\356^\300\210!c\006\215\0326\336\301\213\'o\036\275z\366n\340\310\241c\007\217\036>\356\341\313\247o\037\277~\376\376\000\n$h\020\241B\206~\000\t\"d\010\221\"F\016!J\244h\021\243F\216\216 I\242d\t\223&N\036A\212$i\022\245J\226\236@\211\"e\n\225*V.a\312\244i\023\247N\236\256`\311\242e\013\227.^>\201\n%j\024\251R\246\276\200\t#f\014\2312fN\241J\245j\025\253V\256\316\240I\243f\r\2336n^\301%\212%k\026\255Z\266\336\300\211#g\016\235:vn\341\312\245k\027\257^\276\356\340\311\243g\017\037\200}\374\004\004\000!\371\004\010\031\000\000\000,\000\000\000\000\020\000\020\000\000\010\377\000\177\000\t\"d\010\221\"F\376\001\010 `\000\201\002\006\216 I\242d\t\223&N\016 H\240`\001\203\006\016\236@\211\"e\n\225*V\036@\210 a\002\205\n\026\256`\311\242e\013\227.^.`\310\240a\003\207\016\036\276\200\t#f\014\2312f>\200\010!b\004\211\022&\316\240I\243f\r\2336nN\240H\241b\005\213\026.\336\300\211#g\016\235:v^\300\210!c\006\215\0326\356\340\311\243g\017\237>~n\340\310\241c\007\217\036>~\001\013&l\030\261b\306\376\000\n$h\020\241B\206\216!K\246l\031\263f\316\016!J\244h\021\243F\216\236A\213&m\032\265j\326\036A\212$i\022\245J\226\256a\313\246m\033\267n\336.a\312\244i\023\247N\236\276\201\013\'n\034\271r\346>\201\n%j\024\251R\246\316\241K\247n\035\273v\356N\241J\245j\025\253V\256\336\301%\213\'o\036\275z\366^\301\212%k\026\255Z\266\356\341\313\247o\037\277~\376n\341\312\245k\027/\200\275|\005\004\000!\371\004\010\031\000\000\000,\000\000\000\000\020\000\020\000\000\010\377\000\377\000\n$h\020\241B\206~\000\t\"d\010\221\"F\016!J\244h\021\243F\216\216 I\242d\t\223&N\036A\212$i\022\245J\226\236@\211\"e\n\225*V.a\312\244i\023\247N\236\256`\311\242e\013\227.^>\201\n%j\024\251R\246\276\200\t#f\014\2312fN\241J\245j\025\253V\256\316\240I\243f\r\2336n^\301\212%k\026\255Z\266\336\300\211#g\016\235:vn\341\312\245k\027\257^\276\356\340\311\243g\017\237>~\376\001\010 `\000\201\002\006~\001\013&l\030\261b\306\016 H\240`\001\203\006\016\216!K\246l\031\263f\316\036@\210 a\002\205\n\026\236A\213&m\032\265j\326.`\310\240a\003\207\016\036\256a\313\246m\033\267n\336>\200\010!b\004\211\022&\276\201\013\'n\034\271r\346N\240H\241b\005\213\026.\316\241K\247n\035\273v\356^\300%\210!c\006\215\0326\336\301\213\'o\036\275z\366n\340\310\241c\007\217\036>\356\341\313\247o\037?\200\375\374\005\004\000;"
}
}

View File

@ -42,6 +42,10 @@ skipIfNoMatplotlib = unittest.skipIf(not TEST_MATPLOTLIB, "no matplotlib")
import torch
from common_utils import TestCase, run_tests
def tensor_N(shape, dtype=float):
numel = np.prod(shape)
x = (np.arange(numel, dtype=dtype)).reshape(shape)
return x
class BaseTestCase(TestCase):
""" Base class used for all TensorBoard tests """
@ -315,31 +319,31 @@ if TEST_TENSORBOARD:
def test_image_with_boxes(self):
self.assertTrue(compare_proto(summary.image_boxes('dummy',
np.random.rand(3, 32, 32).astype(np.float32),
tensor_N(shape=(3, 32, 32)),
np.array([[10, 10, 40, 40]])),
self))
def test_image_with_one_channel(self):
self.assertTrue(compare_proto(summary.image('dummy',
np.random.rand(1, 8, 8).astype(np.float32),
tensor_N(shape=(1, 8, 8)),
dataformats='CHW'),
self)) # noqa E127
def test_image_with_one_channel_batched(self):
self.assertTrue(compare_proto(summary.image('dummy',
np.random.rand(2, 1, 8, 8).astype(np.float32),
tensor_N(shape=(2, 1, 8, 8)),
dataformats='NCHW'),
self)) # noqa E127
def test_image_with_3_channel_batched(self):
self.assertTrue(compare_proto(summary.image('dummy',
np.random.rand(2, 3, 8, 8).astype(np.float32),
tensor_N(shape=(2, 3, 8, 8)),
dataformats='NCHW'),
self)) # noqa E127
def test_image_without_channel(self):
self.assertTrue(compare_proto(summary.image('dummy',
np.random.rand(8, 8).astype(np.float32),
tensor_N(shape=(8, 8)),
dataformats='HW'),
self)) # noqa E127
@ -348,56 +352,57 @@ if TEST_TENSORBOARD:
import moviepy # noqa F401
except ImportError:
return
self.assertTrue(compare_proto(summary.video('dummy', np.random.rand(4, 3, 1, 8, 8).astype(np.float32)), self))
summary.video('dummy', np.random.rand(16, 48, 1, 28, 28).astype(np.float32))
summary.video('dummy', np.random.rand(20, 7, 1, 8, 8).astype(np.float32))
self.assertTrue(compare_proto(summary.video('dummy', tensor_N(shape=(4, 3, 1, 8, 8))), self))
summary.video('dummy', np.random.rand(16, 48, 1, 28, 28))
summary.video('dummy', np.random.rand(20, 7, 1, 8, 8))
def test_audio(self):
self.assertTrue(compare_proto(summary.audio('dummy', np.random.rand(42)), self))
self.assertTrue(compare_proto(summary.audio('dummy', tensor_N(shape=(42,))), self))
def test_text(self):
self.assertTrue(compare_proto(summary.text('dummy', 'text 123'), self))
def test_histogram_auto(self):
self.assertTrue(compare_proto(summary.histogram('dummy', np.random.rand(1024), bins='auto', max_bins=5), self))
self.assertTrue(compare_proto(summary.histogram('dummy', tensor_N(shape=(1024,)), bins='auto', max_bins=5), self))
def test_histogram_fd(self):
self.assertTrue(compare_proto(summary.histogram('dummy', np.random.rand(1024), bins='fd', max_bins=5), self))
self.assertTrue(compare_proto(summary.histogram('dummy', tensor_N(shape=(1024,)), bins='fd', max_bins=5), self))
def test_histogram_doane(self):
self.assertTrue(compare_proto(summary.histogram('dummy', np.random.rand(1024), bins='doane', max_bins=5), self))
self.assertTrue(compare_proto(summary.histogram('dummy', tensor_N(shape=(1024,)), bins='doane', max_bins=5), self))
def test_custom_scalars(self):
layout = {'Taiwan': {'twse': ['Multiline', ['twse/0050', 'twse/2330']]},
'USA': {'dow': ['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']],
'nasdaq': ['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}}
summary.custom_scalars(layout) # only smoke test. Because protobuf in python2/3 serialize dictionary differently.
def remove_whitespace(string):
return string.replace(' ', '').replace('\t', '').replace('\n', '')
def compare_proto(str_to_compare, function_ptr):
# TODO: enable test after tensorboard is ready.
return True
if 'histogram' in function_ptr.id():
return # numpy.histogram has slight difference between versions
if 'pr_curve' in function_ptr.id():
return # pr_curve depends on numpy.histogram
module_id = function_ptr.__class__.__module__
test_dir = os.path.dirname(sys.modules[module_id].__file__)
functionName = function_ptr.id().split('.')[-1]
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
expected_file = os.path.join(test_dir,
"expect",
module_id.split('.')[-1] + '.' + functionName + ".expect")
'TestTensorBoard.' + functionName + ".expect")
assert os.path.exists(expected_file)
with open(expected_file) as f:
expected = f.read()
str_to_compare = str(str_to_compare)
# if not remove_whitespace(str_to_compare) == remove_whitespace(expected):
return remove_whitespace(str_to_compare) == remove_whitespace(expected)
def write_proto(str_to_compare, function_ptr):
module_id = function_ptr.__class__.__module__
test_dir = os.path.dirname(sys.modules[module_id].__file__)
functionName = function_ptr.id().split('.')[-1]
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
expected_file = os.path.join(test_dir,
"expect",
module_id.split('.')[-1] + '.' + functionName + ".expect")
'TestTensorBoard.' + functionName + ".expect")
with open(expected_file, 'w') as f:
f.write(str(str_to_compare))
@ -414,7 +419,7 @@ if TEST_TENSORBOARD:
return self.l(x)
with SummaryWriter(comment='LinearModel') as w:
w.add_graph(myLinear(), dummy_input, True)
w.add_graph(myLinear(), dummy_input)
def test_mlp_graph(self):
dummy_input = (torch.zeros(2, 1, 28, 28),)
@ -442,7 +447,7 @@ if TEST_TENSORBOARD:
return h
with SummaryWriter(comment='MLPModel') as w:
w.add_graph(myMLP(), dummy_input, True)
w.add_graph(myMLP(), dummy_input)
def test_wrong_input_size(self):
with self.assertRaises(RuntimeError) as e_info:
@ -527,7 +532,7 @@ if TEST_TENSORBOARD:
@skipIfNoCaffe2
def test_caffe2_np(self):
workspace.FeedBlob("testBlob", np.random.randn(1, 3, 64, 64).astype(np.float32))
workspace.FeedBlob("testBlob", tensor_N(shape=(1, 3, 64, 64)))
self.assertIsInstance(make_np('testBlob'), np.ndarray)
@skipIfNoCaffe2