mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: This PR implements UninitializedBuffer and LazyBatchnormXd based on https://github.com/pytorch/pytorch/issues/44538. (cc. emcastillo and albanD) Pull Request resolved: https://github.com/pytorch/pytorch/pull/51548 Reviewed By: zhangguanheng66 Differential Revision: D26276903 Pulled By: albanD fbshipit-source-id: 0ac706974178363f8af075e59b41d5989418922f
24 lines
753 B
Python
24 lines
753 B
Python
import torch
|
|
from .. import Tensor
|
|
from typing import Tuple, Optional
|
|
import builtins
|
|
|
|
class Parameter(Tensor):
|
|
def __init__(self, data: Tensor=..., requires_grad: builtins.bool=...): ...
|
|
|
|
...
|
|
|
|
def is_lazy(param: Tensor): ...
|
|
|
|
class UninitializedParameter(Tensor):
|
|
def __init__(self, data: Tensor=..., requires_grad: builtins.bool=...): ...
|
|
|
|
def materialize(self, shape: Tuple[int, ...], device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None): ...
|
|
...
|
|
|
|
class UninitializedBuffer(Tensor):
|
|
def __init__(self, data: Tensor=..., requires_grad: builtins.bool=...): ...
|
|
|
|
def materialize(self, shape: Tuple[int, ...], device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None): ...
|
|
...
|