pytorch/caffe2/python/modeling/parameter_info.py
Hassan Eslami 3578909671 Remove unused code base for distributed training (#10282)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/10282

This diff removes the unused/deprecated features from the code base.

Reviewed By: manojkris

Differential Revision: D9169859

fbshipit-source-id: d6447b7916a7c687b44b20da868112e6720ba245
2018-08-16 20:10:17 -07:00

55 lines
1.5 KiB
Python

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import numpy as np
class ParameterTags(object):
BIAS = 'BIAS'
WEIGHT = 'WEIGHT'
COMPUTED_PARAM = 'COMPUTED_PARAM'
class ParameterInfo(object):
def __init__(
self, param_id, param, key=None, shape=None, length=None,
grad=None, blob_copy=None):
assert isinstance(param, core.BlobReference)
self.param_id = param_id
self.name = str(param)
self.blob = param
self.key = key
self.shape = shape
self.size = None if shape is None else np.prod(shape)
self.length = max(1, length if length is not None else 1)
self.grad = grad
self._cloned_init_net = None
# Optionally store equivalent copies of the blob
# in different precisions (i.e. half and float copies)
# stored as a dict of TensorProto.DataType -> BlobReference
self.blob_copy = blob_copy
# each param_info can have its own optimizer. It can be set within
# OptimizerContext (caffe2/python/optimizer.py)
self._optimizer = None
@property
def parameter(self):
return self.blob
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, value):
assert self._optimizer is None, "optimizer has already been set"
self._optimizer = value
def __str__(self):
return self.name