mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Generally wildcard imports are bad for the reasons described here: https://www.flake8rules.com/rules/F403.html This PR replaces wildcard imports with an explicit list of imported items where possible, and adds a `# noqa: F403` comment in the other cases (mostly re-exports in `__init__.py` files). This is a prerequisite for https://github.com/pytorch/pytorch/issues/55816, because currently [`tools/codegen/dest/register_dispatch_key.py` simply fails if you sort its imports](https://github.com/pytorch/pytorch/actions/runs/742505908). Pull Request resolved: https://github.com/pytorch/pytorch/pull/55838 Test Plan: CI. You can also run `flake8` locally. Reviewed By: jbschlosser Differential Revision: D27724232 Pulled By: samestep fbshipit-source-id: 269fb09cb4168f8a51fd65bfaacc6cda7fb87c34
61 lines
2.4 KiB
Python
61 lines
2.4 KiB
Python
from .quantize import * # noqa: F403
|
|
from .observer import * # noqa: F403
|
|
from .qconfig import * # noqa: F403
|
|
from .fake_quantize import * # noqa: F403
|
|
from .fuse_modules import fuse_modules
|
|
from .stubs import * # noqa: F403
|
|
from .quant_type import * # noqa: F403
|
|
from .quantize_jit import * # noqa: F403
|
|
# from .quantize_fx import *
|
|
from .quantization_mappings import * # noqa: F403
|
|
from .fuser_method_mappings import * # noqa: F403
|
|
|
|
def default_eval_fn(model, calib_data):
|
|
r"""
|
|
Default evaluation function takes a torch.utils.data.Dataset or a list of
|
|
input Tensors and run the model on the dataset
|
|
"""
|
|
for data, target in calib_data:
|
|
model(data)
|
|
|
|
_all__ = [
|
|
'QuantWrapper', 'QuantStub', 'DeQuantStub',
|
|
# Top level API for eager mode quantization
|
|
'quantize', 'quantize_dynamic', 'quantize_qat',
|
|
'prepare', 'convert', 'prepare_qat',
|
|
# Top level API for graph mode quantization on TorchScript
|
|
'quantize_jit', 'quantize_dynamic_jit',
|
|
# Top level API for graph mode quantization on GraphModule(torch.fx)
|
|
# 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx
|
|
# 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
|
|
'QuantType', 'quant_type_to_str', # quantization type
|
|
# custom module APIs
|
|
'get_default_static_quant_module_mappings', 'get_static_quant_module_class',
|
|
'get_default_dynamic_quant_module_mappings',
|
|
'get_default_qat_module_mappings',
|
|
'get_default_qconfig_propagation_list',
|
|
'get_default_compare_output_module_list',
|
|
'get_quantized_operator',
|
|
'get_fuser_method',
|
|
# Sub functions for `prepare` and `swap_module`
|
|
'propagate_qconfig_', 'add_quant_dequant', 'add_observer_', 'swap_module',
|
|
'default_eval_fn', 'get_observer_dict',
|
|
'register_activation_post_process_hook',
|
|
# Observers
|
|
'ObserverBase', 'WeightObserver', 'observer', 'default_observer',
|
|
'default_weight_observer', 'default_placeholder_observer',
|
|
# FakeQuantize (for qat)
|
|
'default_fake_quant', 'default_weight_fake_quant',
|
|
'default_symmetric_fixed_qparams_fake_quant',
|
|
'default_affine_fixed_qparams_fake_quant',
|
|
'default_per_channel_weight_fake_quant',
|
|
'default_histogram_fake_quant',
|
|
# QConfig
|
|
'QConfig', 'default_qconfig', 'default_dynamic_qconfig', 'float16_dynamic_qconfig',
|
|
'float_qparams_weight_only_qconfig',
|
|
# QAT utilities
|
|
'default_qat_qconfig', 'prepare_qat', 'quantize_qat',
|
|
# module transformations
|
|
'fuse_modules',
|
|
]
|