diff --git a/torch/distributed/optim/optimizer.py b/torch/distributed/optim/optimizer.py index b10eacefea0..a68e9fc9c11 100644 --- a/torch/distributed/optim/optimizer.py +++ b/torch/distributed/optim/optimizer.py @@ -1,4 +1,5 @@ from typing import List, Optional +import logging import torch.distributed.rpc as rpc import torch.optim as optim @@ -18,6 +19,7 @@ import torch.distributed.autograd as dist_autograd from collections import defaultdict from threading import Lock +logger = logging.getLogger(__name__) # XXX: we define a _ScriptModuleOptimizer here to explicitly # compile the FunctionalOptimizer class into TorchScript @@ -207,6 +209,13 @@ class DistributedOptimizer: if self.is_functional_optim: optimizer_new_func = _new_script_local_optimizer else: + logger.warn( + f"Creating the optimizer {optimizer_class} without TorchScript support, " + "this might result in slow computation time in multithreading environment" + "(i.e. Distributed Model Parallel training on CPU) due to the Python's " + "Global Interpreter Lock (GIL). Please file an issue if you need this " + "optimizer in TorchScript. " + ) optimizer_new_func = _new_local_optimizer remote_optim_futs = []