mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Test Plan: revert-hammer
Differential Revision:
D30117838 (3f09485d7e)
Original commit changeset: e6365a910a3d
fbshipit-source-id: f276b2b2bdf5f7bd27df473fca0eebaee9f7aef2
11 lines
516 B
Python
11 lines
516 B
Python
"""
|
|
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
|
|
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
|
|
optimizer locally on the workers where the parameters live. The distributed
|
|
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
|
|
apply the gradients on each worker.
|
|
"""
|
|
from .optimizer import DistributedOptimizer
|
|
from .post_localSGD_optimizer import PostLocalSGDOptimizer
|
|
from .zero_redundancy_optimizer import ZeroRedundancyOptimizer
|