Reverts force_gpu_half changes from #3660 (#5000)

The test_cuda.py setup purports to test half tensors, but actually just
re-tests FloatTensors because the keys in type_map were str instead of
type. Testing HalfTensors is more complicated, requiring changes to
precision and requires excluding some unimplemented methods.

We should fully test half CUDA tensors. This change just deletes the
duplicate tests of FloatTensor.
This commit is contained in:
Sam Gross 2018-02-07 15:33:17 -05:00 committed by GitHub
parent 3e85613751
commit 85e22b5475
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -406,18 +406,12 @@ def get_cycles_per_ms():
return _cycles_per_ms
def compare_cpu_gpu(tensor_constructor, arg_constructor, fn, t, precision=1e-5, force_gpu_half=False):
def compare_cpu_gpu(tensor_constructor, arg_constructor, fn, t, precision=1e-5):
def tmp(self):
cpu_tensor = tensor_constructor(t)
type_map = {}
if force_gpu_half:
type_map = {
'torch.FloatTensor': 'torch.cuda.HalfTensor',
'torch.DoubleTensor': 'torch.cuda.HalfTensor',
}
gpu_tensor = to_gpu(cpu_tensor, type_map)
gpu_tensor = to_gpu(cpu_tensor)
cpu_args = arg_constructor(t)
gpu_args = [to_gpu(arg, type_map) for arg in cpu_args]
gpu_args = [to_gpu(arg) for arg in cpu_args]
cpu_result = getattr(cpu_tensor, fn)(*cpu_args)
try:
gpu_result = getattr(gpu_tensor, fn)(*gpu_args)
@ -1407,12 +1401,6 @@ if HAS_CUDA:
setattr(TestCuda,
test_name,
compare_cpu_gpu(constr, arg_constr, name_inner, t, precision))
if t == torch.FloatTensor and not IS_WINDOWS: # CUDA HalfTensor currently doesn't work on Windows
assert not hasattr(TestCuda, test_name + '_gpu_half'), "Duplicated test name: " + test_name
setattr(TestCuda,
test_name + '_gpu_half',
compare_cpu_gpu(constr, arg_constr, name_inner, t,
precision, force_gpu_half=True))
if __name__ == '__main__':