pytorch/caffe2/python/layers/reservoir_sampling.py
Kittipat Virochsiri 9c4872f4bc Reservoir sampling with object ID deduplication
Summary: Adding the option to dedup by object ID so that more frequent objects are not present more than once in the reservoir

Reviewed By: chocjy

Differential Revision: D5503109

fbshipit-source-id: e36c3ad8eea134d6c10a4c875fceadc0f843c976
2017-08-10 15:27:20 -07:00

98 lines
3.5 KiB
Python

## @package reservoir_sampling
# Module caffe2.python.layers.reservoir_sampling
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
LayerParameter,
ModelLayer,
)
class ReservoirSampling(ModelLayer):
"""
Collect samples from input record w/ reservoir sampling. If you have complex
data, use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
name='reservoir_sampling', **kwargs):
super(ReservoirSampling, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
self.reservoir = model.net.NextScopedBlob(name + "_reservoir")
self.num_visited_blob = model.net.NextScopedBlob(
name + "_num_visited")
self.params.append(LayerParameter(
parameter=self.reservoir,
initializer=core.CreateOperator(
'ConstantFill', [], self.reservoir, shape=[0]
),
optimizer=model.NoOptim,
))
self.params.append(LayerParameter(
parameter=self.num_visited_blob,
initializer=core.CreateOperator(
'ConstantFill',
[],
self.num_visited_blob,
shape=[],
value=0,
dtype=core.DataType.INT64,
),
optimizer=model.NoOptim,
))
self.extra_input_blobs = []
self.extra_output_blobs = []
if 'object_id' in input_record:
self.extra_input_blobs.append(input_record.object_id())
object_to_pos = model.net.NextScopedBlob(name + "_object_to_pos")
pos_to_object = model.net.NextScopedBlob(name + "_pos_to_object")
self.extra_input_blobs.extend([object_to_pos, pos_to_object])
self.extra_output_blobs.extend([object_to_pos, pos_to_object])
self.params.append(LayerParameter(
parameter=object_to_pos,
initializer=core.CreateOperator(
'CreateMap', [], object_to_pos,
key_dtype=core.DataType.INT64,
valued_dtype=core.DataType.INT32,
),
optimizer=model.NoOptim,
))
self.params.append(LayerParameter(
parameter=pos_to_object,
initializer=core.CreateOperator(
'ConstantFill',
[],
pos_to_object,
shape=[0],
value=0,
dtype=core.DataType.INT64,
),
optimizer=model.NoOptim,
))
self.output_schema = schema.from_blob_list(
input_record.data, [model.net.NextScopedBlob(name + "_output")])
def add_ops(self, net):
net.ReservoirSampling(
[self.reservoir, self.num_visited_blob, self.input_record.data()]
+ self.extra_input_blobs,
[self.reservoir, self.num_visited_blob] + self.extra_output_blobs,
num_to_collect=self.num_to_collect,
)
# Copy to make sure DAG of record is not broken.
# Also, the output of this is likely going through a pipeline, which
# will move data and require us to copy anyway.
net.Copy(self.reservoir, self.output_schema())