mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Adds support for generating and training pfp16 models. Added SGD optimizer for multi-precision trainers and a new callback to data_parallel_model in order to help multi-precision models keep their different copies of parameters in sync during training. Closes https://github.com/caffe2/caffe2/pull/697 Differential Revision: D5159712 Pulled By: salexspb fbshipit-source-id: 60a889494d2e2f4df1d720331e19f638c5eb95cc
89 lines
3.1 KiB
Python
89 lines
3.1 KiB
Python
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from __future__ import unicode_literals
|
|
from caffe2.python.modeling.parameter_info import ParameterInfo
|
|
from caffe2.python.core import DataType
|
|
|
|
|
|
class Initializer(object):
|
|
'''
|
|
This class abstracts out parameter creation. One can come up with a new
|
|
Initializer in order to implement more complex parameter initializaion logic
|
|
'''
|
|
def __init__(self, operator_name=None, **kwargs):
|
|
self.operator_name = operator_name
|
|
self.operator_kwargs = kwargs
|
|
|
|
def update(self, operator_name, kwargs):
|
|
if self.operator_name is not None:
|
|
raise Exception("Operator name overwrites are not allowed")
|
|
self.operator_name = operator_name
|
|
self.operator_kwargs = kwargs
|
|
|
|
def create_param(self, param_name, init_net, shape):
|
|
param = init_net.__getattr__(self.operator_name)(
|
|
[], param_name, shape=shape, **self.operator_kwargs)
|
|
return ParameterInfo(
|
|
param_id=None,
|
|
param=param,
|
|
shape=shape,
|
|
)
|
|
|
|
class pFP16Initializer(Initializer):
|
|
def update(self, operator_name, kwargs):
|
|
if self.operator_name is not None:
|
|
raise Exception("Operator name overwrites are not allowed")
|
|
self.operator_name = operator_name
|
|
self.operator_kwargs = kwargs
|
|
|
|
def create_param(self, param_name, init_net, shape):
|
|
# create master fp32 copy
|
|
param_fp32 = init_net.__getattr__(self.operator_name)(
|
|
[], param_name + "_fp32", shape=shape,
|
|
**self.operator_kwargs)
|
|
# cast to fp16 copy
|
|
param = init_net.FloatToHalf(
|
|
param_fp32, param_name)
|
|
|
|
return ParameterInfo(
|
|
param_id=None,
|
|
param=param,
|
|
shape=shape,
|
|
blob_copy={DataType.FLOAT : param_fp32}
|
|
)
|
|
|
|
def update_initializer(initializer_class,
|
|
operator_name_and_kwargs,
|
|
default_operator_name_and_kwargs):
|
|
'''
|
|
A helper function to convert from operator_name_and_kwargs to new
|
|
object of type initializer_class. This function serves two purposes:
|
|
|
|
1. Support for custom initialization operators being passed in
|
|
2. Allow user to specify a custom Initializer without overwriting
|
|
default operators used for initialization
|
|
|
|
If initializer_class is None, creates a default initializer using
|
|
the Initializer class and operator_name_and_kwargs provided
|
|
|
|
If operator_name_and_kwargs is None, uses default_operator_name_and_kwargs
|
|
|
|
returns an instantiated Initializer object
|
|
'''
|
|
def get_initializer_args():
|
|
return (
|
|
operator_name_and_kwargs or
|
|
default_operator_name_and_kwargs
|
|
)
|
|
|
|
if initializer_class is not None:
|
|
init = initializer_class(get_initializer_args()[0],
|
|
**get_initializer_args()[1])
|
|
else:
|
|
init = Initializer(
|
|
get_initializer_args()[0],
|
|
**get_initializer_args()[1]
|
|
)
|
|
return init
|