mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Fixes an issue that arose from https://github.com/pytorch/pytorch/pull/13481 where `.shared_memory()` couldn't be called. Effectively undoes all changes to `nn.Module` from that PR and solve the relevant problem in a different way (the goal was to be able to call `._apply()` on the Python wrapper for a C++ module). soumith Pull Request resolved: https://github.com/pytorch/pytorch/pull/15305 Differential Revision: D13493937 Pulled By: goldsborough fbshipit-source-id: 4cb8687f90fc8709a536c5e7eacd0dc8edf6f750
91 lines
2.8 KiB
Python
91 lines
2.8 KiB
Python
"""Functionality for Python <-> C++ frontend inter-op."""
|
|
|
|
from torch import nn
|
|
|
|
|
|
class OrderedDictWrapper(object):
|
|
"""
|
|
A wrapper around a C++ OrderedDict that dynamically evaluates the
|
|
OrderedDict getter on a bound C++ module, such that new changes on the C++
|
|
side are picked up. Otherwise accessing e.g. ``cpp_module._parameters`` just
|
|
once would get a frozen copy of the parameters at the time of access.
|
|
``torch.nn.Module`` accesses ``_parameters`` et al. via ``self.__dict__`` so
|
|
using properties does not work.
|
|
"""
|
|
|
|
def __init__(self, cpp_module, attr):
|
|
self.cpp_module = cpp_module
|
|
self.attr = attr
|
|
|
|
@property
|
|
def cpp_dict(self):
|
|
return getattr(self.cpp_module, self.attr)
|
|
|
|
# Magic methods cannot be assigned dynamically and bypass ``getattr``, so we
|
|
# must manually override them.
|
|
|
|
def items(self):
|
|
return self.cpp_dict.items()
|
|
|
|
def keys(self):
|
|
return self.cpp_dict.keys()
|
|
|
|
def values(self):
|
|
return self.cpp_dict.values()
|
|
|
|
def __iter__(self):
|
|
return self.cpp_dict.__iter__()
|
|
|
|
def __len__(self):
|
|
return self.cpp_dict.__len__()
|
|
|
|
def __contains__(self, key):
|
|
return self.cpp_dict.__contains__(key)
|
|
|
|
def __getitem__(self, key):
|
|
return self.cpp_dict.__getitem__(key)
|
|
|
|
|
|
class ModuleWrapper(nn.Module):
|
|
"""
|
|
A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and
|
|
delegates all access.
|
|
"""
|
|
|
|
def __init__(self, cpp_module):
|
|
# Assign before the super class constructor so ``self.training`` can be
|
|
# assigned to in the super class constructor.
|
|
self.cpp_module = cpp_module
|
|
super(ModuleWrapper, self).__init__()
|
|
self._parameters = OrderedDictWrapper(cpp_module, "_parameters")
|
|
self._buffers = OrderedDictWrapper(cpp_module, "_buffers")
|
|
self._modules = OrderedDictWrapper(cpp_module, "_modules")
|
|
for attr in dir(cpp_module):
|
|
# Skip magic methods and the three attributes above.
|
|
if not attr.startswith("_"):
|
|
setattr(self, attr, getattr(self.cpp_module, attr))
|
|
|
|
def _apply(self, fn):
|
|
for param in self.parameters():
|
|
# Tensors stored in modules are graph leaves, and we don't
|
|
# want to create copy nodes, so we have to unpack the data.
|
|
param.data = fn(param.data)
|
|
if param._grad is not None:
|
|
param._grad.data = fn(param._grad.data)
|
|
|
|
for buf in self.buffers():
|
|
buf.data = fn(buf.data)
|
|
|
|
return self
|
|
|
|
@property
|
|
def training(self):
|
|
return self.cpp_module.training
|
|
|
|
@training.setter
|
|
def training(self, mode):
|
|
self.cpp_module.train(mode)
|
|
|
|
def __repr__(self):
|
|
return self.cpp_module.__repr__()
|