mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Following https://github.com/pytorch/rfcs/blob/master/RFC-0019-Extending-PyTorch-Quantization-to-Custom-Backends.md we implemented the backend configuration for fbgemm/qnnpack backend, currently it was under fx folder, but we'd like to use this for all different workflows, including eager, fx graph and define by run quantization, this PR moves it to torch.ao.quantization namespace so that it can be shared by different workflows Also moves some utility functions specific to fx to fx/backend_config_utils.py and some files are kept in fx folder (quantize_handler.py and fuse_handler.py) Test Plan: python test/teset_quantization.py TestQuantizeFx python test/teset_quantization.py TestQuantizeFxOps python test/teset_quantization.py TestQuantizeFxModels python test/test_quantization.py TestAOMigrationQuantization python test/test_quantization.py TestAOMigrationQuantizationFx Reviewers: Subscribers: Tasks: Tags: Pull Request resolved: https://github.com/pytorch/pytorch/pull/75823 Approved by: https://github.com/vkuzo
40 lines
2.0 KiB
Python
40 lines
2.0 KiB
Python
# flake8: noqa: F401
|
|
r"""
|
|
This file is in the process of migration to `torch/ao/quantization`, and
|
|
is kept here for compatibility while the migration process is ongoing.
|
|
If you are adding a new entry/functionality, please, add it to the
|
|
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
|
|
here.
|
|
"""
|
|
from torch.ao.quantization.fx.quantization_patterns import (
|
|
QuantizeHandler,
|
|
BinaryOpQuantizeHandler,
|
|
CatQuantizeHandler,
|
|
ConvReluQuantizeHandler,
|
|
LinearReLUQuantizeHandler,
|
|
BatchNormQuantizeHandler,
|
|
EmbeddingQuantizeHandler,
|
|
RNNDynamicQuantizeHandler,
|
|
DefaultNodeQuantizeHandler,
|
|
FixedQParamsOpQuantizeHandler,
|
|
CopyNodeQuantizeHandler,
|
|
CustomModuleQuantizeHandler,
|
|
GeneralTensorShapeOpQuantizeHandler,
|
|
StandaloneModuleQuantizeHandler
|
|
)
|
|
|
|
QuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
BinaryOpQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
CatQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
ConvReluQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
LinearReLUQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
BatchNormQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
EmbeddingQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
RNNDynamicQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
DefaultNodeQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
FixedQParamsOpQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
CopyNodeQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
CustomModuleQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
GeneralTensorShapeOpQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
|
StandaloneModuleQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|