[quant][fx] Remove additional_object_mapping from the docs (#75389)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/75389

This seems to be removed before, so won't mark this PR as bc-breaking, this use case
is now enabled with backend_config_dict api

Test Plan:
python test/test_quantization.py TestQuantizeFx
python test/test_quantization.py TestQuantizeFxOps

Imported from OSS

Reviewed By: vkuzo

Differential Revision: D35451960

fbshipit-source-id: 21a8f19c1968af44bf4fa603f16ee8c6f5080e5a
(cherry picked from commit 2862f17b57f846b55736bc6b5d10df4256567adf)
This commit is contained in:
Jerry Zhang 2022-04-11 03:32:28 -07:00 committed by PyTorch MergeBot
parent bcf6974c20
commit 72d3d160fb
2 changed files with 0 additions and 22 deletions

View File

@ -6889,15 +6889,7 @@ class TestQuantizeFxModels(QuantizationTestCase):
model = EmbeddingBagLinear().train()
prepared_fx_model = prepare_qat_fx(model, qconfig_dict)
test_only_train_fn(prepared_fx_model, train_indices)
convert_custom_config_dict = {
"additional_object_mapping": {
"static": {
torch.nn.qat.EmbeddingBag: nn.quantized.EmbeddingBag,
}
}
}
quant_model = convert_fx(prepared_fx_model,
convert_custom_config_dict=convert_custom_config_dict,
qconfig_dict=qconfig_dict)
def checkQuantized(model):

View File

@ -594,20 +594,6 @@ def convert_fx(
* `convert_custom_config_dict`: dictionary for custom configurations for convert function::
convert_custom_config_dict = {
# additional object (module/operator) mappings that will overwrite the default
# module mappinng
"additional_object_mapping": {
"static": {
FloatModule: QuantizedModule,
float_op: quantized_op
},
"dynamic": {
FloatModule: DynamicallyQuantizedModule,
float_op: dynamically_quantized_op
},
},
# user will manually define the corresponding quantized
# module class which has a from_observed class method that converts
# observed custom module to quantized custom module