mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Revert D22330340: [C2] Fixed a bug in normalization operator
Test Plan: revert-hammer
Differential Revision:
D22330340 (ce63f70981)
Original commit changeset: 0bccf925bb76
fbshipit-source-id: e27d70dee0fbe9e708b0cf3be81dbd33c4015026
This commit is contained in:
parent
9cc73966b3
commit
a1c234e372
|
|
@ -23,7 +23,7 @@ class NormalizeL1Op final : public Operator<Context> {
|
|||
const auto canonical_axis = x.canonical_axis_index(
|
||||
this->template GetSingleArgument<int>("axis", -1));
|
||||
const int m = x.dim32(canonical_axis);
|
||||
const int n = x.numel() == 0 ? 0 : x.numel() / m;
|
||||
const int n = x.numel() / m;
|
||||
const int sf = x.size_from_dim(canonical_axis + 1);
|
||||
DoNormalize(xData, yData, m, n, sf);
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ class NormalizeOp final : public Operator<Context> {
|
|||
const auto canonical_axis = x.canonical_axis_index(
|
||||
this->template GetSingleArgument<int>("axis", -1));
|
||||
const int64_t m = x.dim(canonical_axis);
|
||||
const size_t n = x.numel() == 0 ? 0 : x.numel() / m;
|
||||
const size_t n = x.numel() / m;
|
||||
const size_t sf = x.size_from_dim(canonical_axis + 1);
|
||||
DoNormalize(xData, yData, m, n, sf);
|
||||
return true;
|
||||
|
|
@ -80,7 +80,7 @@ class NormalizeGradientOp final : public Operator<Context> {
|
|||
const auto canonical_axis = x.canonical_axis_index(
|
||||
this->template GetSingleArgument<int>("axis", -1));
|
||||
const int m = x.dim32(canonical_axis);
|
||||
const int n = x.numel() == 0 ? 0 : x.numel() / m;
|
||||
const int n = x.numel() / m;
|
||||
const int sf = x.size_from_dim(canonical_axis + 1);
|
||||
DoNormalize(xData, gOutData, gInData, m, n, sf);
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -35,20 +35,6 @@ class TestNormalizeOp(hu.HypothesisTestCase):
|
|||
self.assertDeviceChecks(dc, op, [x], [0])
|
||||
self.assertGradientChecks(gc, op, [x], 0, [0])
|
||||
|
||||
@given(**hu.gcs)
|
||||
def test_normalize_empty(self, gc, dc):
|
||||
def ref_normalize(_):
|
||||
return (np.array([]).astype(np.float32),)
|
||||
|
||||
x = np.array([]).astype(np.float32)
|
||||
op = core.CreateOperator("Normalize", "X", "Y")
|
||||
self.assertReferenceChecks(
|
||||
gc, op, [x], ref_normalize
|
||||
)
|
||||
self.assertDeviceChecks(dc, op, [x], [0])
|
||||
self.assertGradientChecks(gc, op, [x], 0, [0])
|
||||
|
||||
|
||||
@given(
|
||||
X=hu.tensor(
|
||||
min_dim=1, max_dim=5, elements=hu.floats(min_value=0.5, max_value=1.0)
|
||||
|
|
@ -65,15 +51,3 @@ class TestNormalizeOp(hu.HypothesisTestCase):
|
|||
op = core.CreateOperator("NormalizeL1", "X", "Y", axis=axis)
|
||||
self.assertReferenceChecks(gc, op, [X], functools.partial(ref, axis=axis))
|
||||
self.assertDeviceChecks(dc, op, [X], [0])
|
||||
|
||||
@given(**hu.gcs)
|
||||
def test_normalize_L1_empty(self, gc, dc):
|
||||
def ref_normalize(_):
|
||||
return (np.array([]).astype(np.float32),)
|
||||
|
||||
x = np.array([]).astype(np.float32)
|
||||
op = core.CreateOperator("NormalizeL1", "X", "Y")
|
||||
self.assertReferenceChecks(
|
||||
gc, op, [x], ref_normalize
|
||||
)
|
||||
self.assertDeviceChecks(dc, op, [x], [0])
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user