mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Fix DDP documentation (#46861)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/46861 Noticed that in the DDP documentation: https://pytorch.org/docs/master/generated/torch.nn.parallel.DistributedDataParallel.html?highlight=distributeddataparallel there were some examples with `torch.nn.DistributedDataParallel`, fix this to read `torch.nn.parallel.DistributedDataParallel`. ghstack-source-id: 115453703 Test Plan: ci Reviewed By: pritamdamania87, SciPioneer Differential Revision: D24534486 fbshipit-source-id: 64b92dc8a55136c23313f7926251fe825a2cb7d5
This commit is contained in:
parent
262bd6437a
commit
ecdbea77bc
|
|
@ -329,7 +329,7 @@ class DistributedDataParallel(Module):
|
|||
Example::
|
||||
|
||||
>>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
|
||||
>>> net = torch.nn.DistributedDataParallel(model, pg)
|
||||
>>> net = torch.nn.parallel.DistributedDataParallel(model, pg)
|
||||
"""
|
||||
def __init__(self, module, device_ids=None,
|
||||
output_device=None, dim=0, broadcast_buffers=True,
|
||||
|
|
@ -626,7 +626,7 @@ class DistributedDataParallel(Module):
|
|||
|
||||
Example::
|
||||
|
||||
>>> ddp = torch.nn.DistributedDataParallel(model, pg)
|
||||
>>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)
|
||||
>>> with ddp.no_sync():
|
||||
>>> for input in inputs:
|
||||
>>> ddp(input).backward() # no synchronization, accumulate grads
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user