mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: After QAT is completed or given pre-tuned weight observer via tunable PTQ algorithm, it should not over-write again with a given weight, at least for static QAT never. Dynamic QAT also does not require to re-run weight observer again by design. This is a fix Test Plan: Signals Differential Revision: D57747749 Pull Request resolved: https://github.com/pytorch/pytorch/pull/127309 Approved by: https://github.com/jerryzh168
28 lines
777 B
Python
28 lines
777 B
Python
import torch
|
|
|
|
__all__ = ['Dropout']
|
|
|
|
class Dropout(torch.nn.Dropout):
|
|
r"""This is the quantized equivalent of :class:`~torch.nn.Dropout`.
|
|
And this is a placeholder to enable models where fp32 tensors
|
|
had dropout to work with quantized tensors in train and eval mode.
|
|
|
|
Args:
|
|
p: probability of an element to be zeroed
|
|
inplace: can optionally do the operation in-place. Default: ``False``
|
|
"""
|
|
|
|
def forward(self, input):
|
|
return input
|
|
|
|
def _get_name(self):
|
|
return 'QuantizedDropout'
|
|
|
|
@classmethod
|
|
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
|
return cls(mod.p, mod.inplace)
|
|
|
|
@classmethod
|
|
def from_reference(cls, mod, scale, zero_point):
|
|
return cls(mod.p, mod.inplace)
|