mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Original commit changeset: 81319beb97f3 Original Phabricator Diff: D47961182 Test Plan: revert to maintain backward compat with legacy ads_dper3 production package. Read details in: S357822 Reviewed By: atuljangra Differential Revision: D48131623 @diff-train-skip-merge (D48131623 landed internally) Pull Request resolved: https://github.com/pytorch/pytorch/pull/106743 Approved by: https://github.com/malfet
41 lines
909 B
Python
41 lines
909 B
Python
import builtins
|
|
from typing import Optional, Tuple
|
|
|
|
import torch
|
|
from torch import Tensor
|
|
|
|
class Parameter(Tensor):
|
|
def __init__(
|
|
self,
|
|
data: Tensor = ...,
|
|
requires_grad: builtins.bool = ...,
|
|
): ...
|
|
|
|
def is_lazy(param: Tensor): ...
|
|
|
|
class UninitializedParameter(Tensor):
|
|
def __init__(
|
|
self,
|
|
data: Tensor = ...,
|
|
requires_grad: builtins.bool = ...,
|
|
): ...
|
|
def materialize(
|
|
self,
|
|
shape: Tuple[int, ...],
|
|
device: Optional[torch.device] = None,
|
|
dtype: Optional[torch.dtype] = None,
|
|
): ...
|
|
|
|
class UninitializedBuffer(Tensor):
|
|
def __init__(
|
|
self,
|
|
data: Tensor = ...,
|
|
requires_grad: builtins.bool = ...,
|
|
): ...
|
|
def materialize(
|
|
self,
|
|
shape: Tuple[int, ...],
|
|
device: Optional[torch.device] = None,
|
|
dtype: Optional[torch.dtype] = None,
|
|
): ...
|