mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
* [fix] Re-enable events in RNN ops We have earlier added event disabling in RNN ops as back then we didn't use events, with current use cases this is no longer true (https://fburl.com/8vd0lp8y) * use ops with cude impl * Revert D7729695: [caffe2][fix] Re-enable events in RNN ops This reverts commit 4b215c7496fb724656ff4c776933a15bdbbcde5e @bypass-lint An infra SEV is better than not reverting this diff. If you copy this password, see you in SEV Review! @cause_a_sev_many_files * [observer] Clean up observer_config.h #accept2ship * [1/n] Refactor dataio_test.py Replace code duplication with a common function * Add barrier net that runs before training nets Add a synchonize barrier net that is run before training nets. With this net, shards that are faster will wait for other shards before start training. This reduce chances of the faster shards timing out during GLOO AllReduce. Removed explicit data_parallel_model.py.synchronize call in holmes workflow. Similar change in speech/asr_training workflow will come in another diff. * Support the dnnlowp backend in caffe2_benchmark This is for SHARE operator latency evaluation * Migrate integral_image_op to main caffe2 migrate integral_image_op(GPU version) given by https://fburl.com/yvqezigi to caffe2/caffe2/operators and implement its CPU version. Write up a test using the hypothesis_test mechanism * [pos_disc, fbcode] Implement unjoined lr loss As explained in https://our.intern.facebook.com/intern/wiki/Model_Based_Calibration/, when the dataset is an joined data set, where labels might change later, we need to use unjoined logloss. The implementation is almost the same as in Sigrid (https://fburl.com/1trngsls), where loss = y (log(p) - log(1-p)) + (1-y)(log(1-p)) = xy - (1-y)x - (1-y)log(1+exp(-x)) For x < 0, to ensure stability and avoid overflow, we reformulate the above exp as loss = xy - (1-y)x - (1-y)x + (1-y)log(1+exp(x)) = xy + (1-y)log(1+exp(x)) Then the final expression becomes loss = xy + (y - 1) x (x >= 0) - (1 - y) log(1 + exp(x - 2 x (x >= 0))) where y is the true label, x is the dot product and p = logistic(x). This kind of implementation is align with the current implementation of the original cross entropy in https://phabricator.intern.facebook.com/diffusion/FBS/browse/master/fbcode/caffe2/caffe2/operators/cross_entropy_op.cc;0bae3b5d0f825897c5e0dd0ff10f489d7271bf25$7-13 * Keep the array to fix the conflict * [C2] Compute Adagrad effective LR The AdagradWithLR op outputs an extra blob which is contains the average effective learning rate across all weights in this blob. * Open-source extractMetaNetDef & runGlobalInitialization, add new Predictor constructor from db file, and add run_map_outputs 1. Open-source extractMetaNetDef and runGlobalInitialization, for use in 2. new Predictor constructor from db file. 3. Add new run function that returns outputs as TensorMap * Disable eigen cpu Disable eigen cpu in transpose and reduce * Introduce request_only/object_only property of ModelLayer by default this is False * A simple TC Caffe2 benchmark We can run tunner, get MappingOptions and then use them to compare against cuBLAS currently broken due to LLVM issues. How to run: hg checkout eec1ab31b59c03b8deded1c755a9abaf8c45be01 add D7401202 add D7434625 add D7506031 add D7540728 buck run @mode/dev-nosan tc/tc/benchmarks_python:caffe2_benchmark * Move Caffe2 feature_maps_ops to open source Need feature maps operators in open source project facebookresearch/BlueWhale * Manually fix the conflicts in channel shuffle op * Fix the inconsistency between different gh and fbcode * Skip Adagrad GPU Test (Because some gpu implementation is missing) * Fix another test to make sure it won't run on gpu when implementation is not available yet
194 lines
6.6 KiB
Python
194 lines
6.6 KiB
Python
## @package batch_lr_loss
|
|
# Module caffe2.python.layers.batch_lr_loss
|
|
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from __future__ import unicode_literals
|
|
|
|
from caffe2.python import core, schema
|
|
from caffe2.python.layers.layers import (
|
|
ModelLayer,
|
|
)
|
|
from caffe2.python.layers.tags import (
|
|
Tags
|
|
)
|
|
import numpy as np
|
|
|
|
|
|
class BatchLRLoss(ModelLayer):
|
|
|
|
def __init__(
|
|
self,
|
|
model,
|
|
input_record,
|
|
name='batch_lr_loss',
|
|
average_loss=True,
|
|
jsd_weight=0.0,
|
|
pos_label_target=1.0,
|
|
neg_label_target=0.0,
|
|
homotopy_weighting=False,
|
|
log_D_trick=False,
|
|
unjoined_lr_loss=False,
|
|
**kwargs
|
|
):
|
|
super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)
|
|
|
|
self.average_loss = average_loss
|
|
|
|
assert (schema.is_schema_subset(
|
|
schema.Struct(
|
|
('label', schema.Scalar()),
|
|
('logit', schema.Scalar())
|
|
),
|
|
input_record
|
|
))
|
|
|
|
self.jsd_fuse = False
|
|
assert jsd_weight >= 0 and jsd_weight <= 1
|
|
if jsd_weight > 0 or homotopy_weighting:
|
|
assert 'prediction' in input_record
|
|
self.init_weight(jsd_weight, homotopy_weighting)
|
|
self.jsd_fuse = True
|
|
self.homotopy_weighting = homotopy_weighting
|
|
|
|
assert pos_label_target <= 1 and pos_label_target >= 0
|
|
assert neg_label_target <= 1 and neg_label_target >= 0
|
|
assert pos_label_target >= neg_label_target
|
|
self.pos_label_target = pos_label_target
|
|
self.neg_label_target = neg_label_target
|
|
|
|
assert not (log_D_trick and unjoined_lr_loss)
|
|
self.log_D_trick = log_D_trick
|
|
self.unjoined_lr_loss = unjoined_lr_loss
|
|
|
|
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
|
|
|
|
self.output_schema = schema.Scalar(
|
|
np.float32,
|
|
self.get_next_blob_reference('output')
|
|
)
|
|
|
|
def init_weight(self, jsd_weight, homotopy_weighting):
|
|
if homotopy_weighting:
|
|
self.mutex = self.create_param(
|
|
param_name=('%s_mutex' % self.name),
|
|
shape=None,
|
|
initializer=('CreateMutex', ),
|
|
optimizer=self.model.NoOptim,
|
|
)
|
|
self.counter = self.create_param(
|
|
param_name=('%s_counter' % self.name),
|
|
shape=[1],
|
|
initializer=(
|
|
'ConstantFill', {
|
|
'value': 0,
|
|
'dtype': core.DataType.INT64
|
|
}
|
|
),
|
|
optimizer=self.model.NoOptim,
|
|
)
|
|
self.xent_weight = self.create_param(
|
|
param_name=('%s_xent_weight' % self.name),
|
|
shape=[1],
|
|
initializer=(
|
|
'ConstantFill', {
|
|
'value': 1.,
|
|
'dtype': core.DataType.FLOAT
|
|
}
|
|
),
|
|
optimizer=self.model.NoOptim,
|
|
)
|
|
self.jsd_weight = self.create_param(
|
|
param_name=('%s_jsd_weight' % self.name),
|
|
shape=[1],
|
|
initializer=(
|
|
'ConstantFill', {
|
|
'value': 0.,
|
|
'dtype': core.DataType.FLOAT
|
|
}
|
|
),
|
|
optimizer=self.model.NoOptim,
|
|
)
|
|
else:
|
|
self.jsd_weight = self.model.add_global_constant(
|
|
'%s_jsd_weight' % self.name, jsd_weight
|
|
)
|
|
self.xent_weight = self.model.add_global_constant(
|
|
'%s_xent_weight' % self.name, 1. - jsd_weight
|
|
)
|
|
|
|
def update_weight(self, net):
|
|
net.AtomicIter([self.mutex, self.counter], [self.counter])
|
|
# iter = 0: lr = 1;
|
|
# iter = 1e6; lr = 0.5^0.1 = 0.93
|
|
# iter = 1e9; lr = 1e-3^0.1 = 0.50
|
|
net.LearningRate([self.counter], [self.xent_weight], base_lr=1.0,
|
|
policy='inv', gamma=1e-6, power=0.1,)
|
|
net.Sub(
|
|
[self.model.global_constants['ONE'], self.xent_weight],
|
|
[self.jsd_weight]
|
|
)
|
|
return self.xent_weight, self.jsd_weight
|
|
|
|
def add_ops(self, net):
|
|
# numerically stable log-softmax with crossentropy
|
|
label = self.input_record.label()
|
|
# mandatory cast to float32
|
|
# self.input_record.label.field_type().base is np.float32 but
|
|
# label type is actually int
|
|
label = net.Cast(
|
|
label,
|
|
net.NextScopedBlob('label_float32'),
|
|
to=core.DataType.FLOAT)
|
|
label = net.ExpandDims(label, net.NextScopedBlob('expanded_label'),
|
|
dims=[1])
|
|
if self.pos_label_target != 1.0 or self.neg_label_target != 0.0:
|
|
label = net.StumpFunc(
|
|
label,
|
|
net.NextScopedBlob('smoothed_label'),
|
|
threshold=0.5,
|
|
low_value=self.neg_label_target,
|
|
high_value=self.pos_label_target,
|
|
)
|
|
xent = net.SigmoidCrossEntropyWithLogits(
|
|
[self.input_record.logit(), label],
|
|
net.NextScopedBlob('cross_entropy'),
|
|
log_D_trick=self.log_D_trick,
|
|
unjoined_lr_loss=self.unjoined_lr_loss
|
|
)
|
|
# fuse with JSD
|
|
if self.jsd_fuse:
|
|
jsd = net.BernoulliJSD(
|
|
[self.input_record.prediction(), label],
|
|
net.NextScopedBlob('jsd'),
|
|
)
|
|
if self.homotopy_weighting:
|
|
self.update_weight(net)
|
|
loss = net.WeightedSum(
|
|
[xent, self.xent_weight, jsd, self.jsd_weight],
|
|
net.NextScopedBlob('loss'),
|
|
)
|
|
else:
|
|
loss = xent
|
|
if 'weight' in self.input_record.fields:
|
|
weight_blob = self.input_record.weight()
|
|
if self.input_record.weight.field_type().base != np.float32:
|
|
weight_blob = net.Cast(
|
|
weight_blob,
|
|
weight_blob + '_float32',
|
|
to=core.DataType.FLOAT
|
|
)
|
|
weight_blob = net.StopGradient(
|
|
[weight_blob],
|
|
[net.NextScopedBlob('weight_stop_gradient')],
|
|
)
|
|
loss = net.Mul(
|
|
[loss, weight_blob],
|
|
net.NextScopedBlob('weighted_cross_entropy'),
|
|
)
|
|
|
|
if self.average_loss:
|
|
net.AveragedLoss(loss, self.output_schema.field_blobs())
|
|
else:
|
|
net.ReduceFrontSum(loss, self.output_schema.field_blobs())
|