pytorch/torch/_C/_autograd.pyi
Xu Zhao eaa993a2e0 Add type annotations to torch._C._distributed_rpc module. (#46624)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/46624

Test Plan: Imported from OSS

Reviewed By: glaringlee

Differential Revision: D24761656

Pulled By: xuzhao9

fbshipit-source-id: b55aee5dd2b97f573a50e5bbfddde7d984943fec
2020-11-06 01:28:51 -08:00

46 lines
1.2 KiB
Python

from typing import List
from enum import Enum
# Defined in tools/autograd/init.cpp
class ProfilerState(Enum):
Disable = ...
CPU = ...
CUDA = ...
NVTX = ...
class ProfilerConfig:
def __init__(
self,
state: ProfilerState,
report_input_shapes: bool,
profile_memory: bool,
with_stack: bool
) -> None: ...
...
class ProfilerEvent:
def cpu_elapsed_us(self, other: ProfilerEvent) -> float: ...
def cpu_memory_usage(self) -> int: ...
def cuda_elapsed_us(self, other: ProfilerEvent) -> float: ...
def cuda_memory_usage(self) -> int: ...
def device(self) -> int: ...
def handle(self) -> int: ...
def has_cuda(self) -> bool: ...
def is_remote(self) -> bool: ...
def kind(self) -> int: ...
def name(self) -> str: ...
def node_id(self) -> int: ...
def sequence_nr(self) -> int: ...
def shapes(self) -> List[List[int]]: ...
def thread_id(self) -> int: ...
...
def _enable_profiler(config: ProfilerConfig) -> None: ...
def _disable_profiler() -> List[List[ProfilerEvent]]: ...
def _profiler_enabled() -> bool: ...
def _enable_record_function(enable: bool) -> None: ...
def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ...