mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Part of #123062 - #123062 Pull Request resolved: https://github.com/pytorch/pytorch/pull/128865 Approved by: https://github.com/ezyang
40 lines
771 B
Python
40 lines
771 B
Python
from torch.nn.quantized import dynamic, functional, modules # noqa: F403
|
|
from torch.nn.quantized.modules import * # noqa: F403
|
|
from torch.nn.quantized.modules import MaxPool2d
|
|
|
|
|
|
__all__ = [
|
|
"BatchNorm2d",
|
|
"BatchNorm3d",
|
|
"Conv1d",
|
|
"Conv2d",
|
|
"Conv3d",
|
|
"ConvTranspose1d",
|
|
"ConvTranspose2d",
|
|
"ConvTranspose3d",
|
|
"DeQuantize",
|
|
"Dropout",
|
|
"ELU",
|
|
"Embedding",
|
|
"EmbeddingBag",
|
|
"GroupNorm",
|
|
"Hardswish",
|
|
"InstanceNorm1d",
|
|
"InstanceNorm2d",
|
|
"InstanceNorm3d",
|
|
"LayerNorm",
|
|
"LeakyReLU",
|
|
"Linear",
|
|
"LSTM",
|
|
"MultiheadAttention",
|
|
"PReLU",
|
|
"Quantize",
|
|
"ReLU6",
|
|
"Sigmoid",
|
|
"Softmax",
|
|
# Wrapper modules
|
|
"FloatFunctional",
|
|
"FXFloatFunctional",
|
|
"QFunctional",
|
|
]
|