pytorch/caffe2/python/operator_test/apmeter_test.py
Yangqing Jia 8286ce1e3a Re-license to Apache
Summary: Closes https://github.com/caffe2/caffe2/pull/1260

Differential Revision: D5906739

Pulled By: Yangqing

fbshipit-source-id: e482ba9ba60b5337d9165f28f7ec68d4518a0902
2017-09-28 16:22:00 -07:00

100 lines
3.5 KiB
Python

# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def calculate_ap(predictions, labels):
N, D = predictions.shape
ap = np.zeros(D)
num_range = np.arange((N), dtype=np.float32) + 1
for k in range(D):
scores = predictions[:N, k]
label = labels[:N, k]
sortind = np.argsort(-scores, kind='mergesort')
truth = label[sortind]
precision = np.cumsum(truth) / num_range
ap[k] = precision[truth.astype(np.bool)].sum() / max(1, truth.sum())
return ap
class TestAPMeterOps(hu.HypothesisTestCase):
@given(predictions=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.1,
max_value=1)),
labels=hu.arrays(dims=[10, 3],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=1)),
**hu.gcs_cpu_only)
def test_average_precision(self, predictions, labels, gc, dc):
op = core.CreateOperator(
"APMeter",
["predictions", "labels"],
["AP"],
buffer_size=10,
)
def op_ref(predictions, labels):
ap = calculate_ap(predictions, labels)
return (ap, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[predictions, labels],
reference=op_ref)
@given(predictions=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.1,
max_value=1)),
labels=hu.arrays(dims=[10, 3],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=1)),
**hu.gcs_cpu_only)
def test_average_precision_small_buffer(self, predictions, labels, gc, dc):
op_small_buffer = core.CreateOperator(
"APMeter",
["predictions", "labels"],
["AP"],
buffer_size=5,
)
def op_ref(predictions, labels):
# We can only hold the last 5 in the buffer
ap = calculate_ap(predictions[5:], labels[5:])
return (ap, )
self.assertReferenceChecks(
device_option=gc,
op=op_small_buffer,
inputs=[predictions, labels],
reference=op_ref
)