Fix clip gradient with empty input (#14709)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14709

As titled

Reviewed By: Wakeupbuddy

Differential Revision: D13305554

fbshipit-source-id: 380062d4b0e4f9dc0207a27766cac7b8d05384d5
This commit is contained in:
Huan Gui 2018-12-05 22:51:23 -08:00 committed by Facebook Github Bot
parent 997df9a6ec
commit ba287eebca
3 changed files with 14 additions and 10 deletions

View File

@ -20,7 +20,7 @@ bool ClipGradientOp<float, CPUContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
CAFFE_ENFORCE_GT(Y.numel(), 0);
CAFFE_ENFORCE_GE(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
dX->ResizeLike(Y);
const float* Ydata = Y.data<float>();

View File

@ -44,7 +44,7 @@ template <>
bool ClipOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
CAFFE_ENFORCE_GT(X.size(), 0);
CAFFE_ENFORCE_GE(X.size(), 0);
Y->ResizeLike(X);
ClipKernel<<<
CAFFE_GET_BLOCKS(X.size()),
@ -60,7 +60,7 @@ bool ClipGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_GE(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
ClipGradientKernel<<<

View File

@ -14,16 +14,18 @@ import caffe2.python.serialized_test.serialized_test_util as serial
class TestClip(serial.SerializedTestCase):
@serial.given(X=hu.tensor(),
@serial.given(X=hu.tensor(min_dim=0),
min_=st.floats(min_value=-2, max_value=0),
max_=st.floats(min_value=0, max_value=2),
inplace=st.booleans(),
**hu.gcs)
def test_clip(self, X, min_, max_, inplace, gc, dc):
# go away from the origin point to avoid kink problems
X[np.abs(X - min_) < 0.05] += 0.1
X[np.abs(X - max_) < 0.05] += 0.1
if np.isscalar(X):
X = np.array([], dtype=np.float32)
else:
X[np.abs(X - min_) < 0.05] += 0.1
X[np.abs(X - max_) < 0.05] += 0.1
def clip_ref(X):
X = X.clip(min_, max_)
@ -40,13 +42,15 @@ class TestClip(serial.SerializedTestCase):
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(),
@given(X=hu.tensor(min_dim=0),
inplace=st.booleans(),
**hu.gcs)
def test_clip_default(self, X, inplace, gc, dc):
# go away from the origin point to avoid kink problems
X += 0.04 * np.sign(X)
if np.isscalar(X):
X = np.array([], dtype=np.float32)
else:
X += 0.04 * np.sign(X)
def clip_ref(X):
return (X,)