mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: - fix https://github.com/pytorch/pytorch/issues/315 - add `__deepcopy__` back to Parameter class Pull Request resolved: https://github.com/pytorch/pytorch/pull/12886 Differential Revision: D12838771 Pulled By: weiyangfb fbshipit-source-id: b2ce12244e36f981d89f6c7cdead63237dd820ea
45 lines
1.6 KiB
Python
45 lines
1.6 KiB
Python
import torch
|
|
from collections import OrderedDict
|
|
|
|
|
|
class Parameter(torch.Tensor):
|
|
r"""A kind of Tensor that is to be considered a module parameter.
|
|
|
|
Parameters are :class:`~torch.Tensor` subclasses, that have a
|
|
very special property when used with :class:`Module` s - when they're
|
|
assigned as Module attributes they are automatically added to the list of
|
|
its parameters, and will appear e.g. in :meth:`~Module.parameters` iterator.
|
|
Assigning a Tensor doesn't have such effect. This is because one might
|
|
want to cache some temporary state, like last hidden state of the RNN, in
|
|
the model. If there was no such class as :class:`Parameter`, these
|
|
temporaries would get registered too.
|
|
|
|
Arguments:
|
|
data (Tensor): parameter tensor.
|
|
requires_grad (bool, optional): if the parameter requires gradient. See
|
|
:ref:`excluding-subgraphs` for more details. Default: `True`
|
|
"""
|
|
|
|
def __new__(cls, data=None, requires_grad=True):
|
|
if data is None:
|
|
data = torch.Tensor()
|
|
return torch.Tensor._make_subclass(cls, data, requires_grad)
|
|
|
|
def __deepcopy__(self, memo):
|
|
if id(self) in memo:
|
|
return memo[id(self)]
|
|
else:
|
|
result = type(self)(self.data.clone(), self.requires_grad)
|
|
memo[id(self)] = result
|
|
return result
|
|
|
|
def __repr__(self):
|
|
return 'Parameter containing:\n' + super(Parameter, self).__repr__()
|
|
|
|
def __reduce_ex__(self, proto):
|
|
# See Note [Don't serialize hooks]
|
|
return (
|
|
torch._utils._rebuild_parameter,
|
|
(self.data, self.requires_grad, OrderedDict())
|
|
)
|