pytorch/torch/quantization/__init__.py
Vasiliy Kuznetsov 6101cbcedb torch.ao migration: fake_quantize.py, phase 1 (#64814)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/64814

1. move the file
```
hg mv caffe2/torch/quantization/fake_quantize.py caffe2/torch/ao/quantization/
```

2. create a new file in the old location and copy the imports
3. fix all callsites inside `torch`

Test Plan:
```
buck test mode/dev //caffe2/test:quantization
```

Reviewed By: z-a-f

Differential Revision: D30866792

fbshipit-source-id: 7a221cb46c0ab01f1c5de9be061f09ecc83ce23e
2021-09-13 15:22:28 -07:00

64 lines
2.5 KiB
Python

from .quantize import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
from .fuser_method_mappings import * # noqa: F403
def default_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
input Tensors and run the model on the dataset
"""
for data, target in calib_data:
model(data)
# TODO(future PR): fix the typo, should be `__all__`
_all__ = [
'QuantWrapper', 'QuantStub', 'DeQuantStub',
# Top level API for eager mode quantization
'quantize', 'quantize_dynamic', 'quantize_qat',
'prepare', 'convert', 'prepare_qat',
# Top level API for graph mode quantization on TorchScript
'quantize_jit', 'quantize_dynamic_jit',
# Top level API for graph mode quantization on GraphModule(torch.fx)
# 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx
# 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
'QuantType', 'quant_type_to_str', # quantization type
# custom module APIs
'get_default_static_quant_module_mappings', 'get_static_quant_module_class',
'get_default_dynamic_quant_module_mappings',
'get_default_qat_module_mappings',
'get_default_qconfig_propagation_list',
'get_default_compare_output_module_list',
'get_quantized_operator',
'get_fuser_method',
# Sub functions for `prepare` and `swap_module`
'propagate_qconfig_', 'add_quant_dequant', 'add_observer_', 'swap_module',
'default_eval_fn', 'get_observer_dict',
'register_activation_post_process_hook',
# Observers
'ObserverBase', 'WeightObserver', 'HistogramObserver',
'observer', 'default_observer',
'default_weight_observer', 'default_placeholder_observer',
'default_per_channel_weight_observer',
# FakeQuantize (for qat)
'default_fake_quant', 'default_weight_fake_quant',
'default_symmetric_fixed_qparams_fake_quant',
'default_affine_fixed_qparams_fake_quant',
'default_per_channel_weight_fake_quant',
'default_histogram_fake_quant',
# QConfig
'QConfig', 'default_qconfig', 'default_dynamic_qconfig', 'float16_dynamic_qconfig',
'float_qparams_weight_only_qconfig',
# QAT utilities
'default_qat_qconfig', 'prepare_qat', 'quantize_qat',
# module transformations
'fuse_modules',
]